VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 71775

Last change on this file since 71775 was 71223, checked in by vboxsync, 7 years ago

NEM/win,VMM,PGM: Ported NEM runloop to ring-0. bugref:9044 [build fixes]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 88.4 KB
Line 
1/* $Id: VMMR0.cpp 71223 2018-03-05 22:19:22Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/stam.h>
33#include <VBox/vmm/tm.h>
34#include "VMMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/gvm.h>
37#ifdef VBOX_WITH_PCI_PASSTHROUGH
38# include <VBox/vmm/pdmpci.h>
39#endif
40#include <VBox/vmm/apic.h>
41
42#include <VBox/vmm/gvmm.h>
43#include <VBox/vmm/gmm.h>
44#include <VBox/vmm/gim.h>
45#include <VBox/intnet.h>
46#include <VBox/vmm/hm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49#include <VBox/version.h>
50#include <VBox/log.h>
51
52#include <iprt/asm-amd64-x86.h>
53#include <iprt/assert.h>
54#include <iprt/crc.h>
55#include <iprt/mp.h>
56#include <iprt/once.h>
57#include <iprt/stdarg.h>
58#include <iprt/string.h>
59#include <iprt/thread.h>
60#include <iprt/timer.h>
61
62#include "dtrace/VBoxVMM.h"
63
64
65#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
66# pragma intrinsic(_AddressOfReturnAddress)
67#endif
68
69#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
70# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
71#endif
72
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78/** @def VMM_CHECK_SMAP_SETUP
79 * SMAP check setup. */
80/** @def VMM_CHECK_SMAP_CHECK
81 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
82 * it will be logged and @a a_BadExpr is executed. */
83/** @def VMM_CHECK_SMAP_CHECK2
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
85 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
86 * executed. */
87#if defined(VBOX_STRICT) || 1
88# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
89# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
90 do { \
91 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
92 { \
93 RTCCUINTREG fEflCheck = ASMGetFlags(); \
94 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
95 { /* likely */ } \
96 else \
97 { \
98 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
99 a_BadExpr; \
100 } \
101 } \
102 } while (0)
103# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
104 do { \
105 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
106 { \
107 RTCCUINTREG fEflCheck = ASMGetFlags(); \
108 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
109 { /* likely */ } \
110 else \
111 { \
112 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
113 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
114 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
115 a_BadExpr; \
116 } \
117 } \
118 } while (0)
119#else
120# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
121# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
122# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
123#endif
124
125
126/*********************************************************************************************************************************
127* Internal Functions *
128*********************************************************************************************************************************/
129RT_C_DECLS_BEGIN
130#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
131extern uint64_t __udivdi3(uint64_t, uint64_t);
132extern uint64_t __umoddi3(uint64_t, uint64_t);
133#endif
134RT_C_DECLS_END
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140/** Drag in necessary library bits.
141 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
142PFNRT g_VMMR0Deps[] =
143{
144 (PFNRT)RTCrc32,
145 (PFNRT)RTOnce,
146#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
147 (PFNRT)__udivdi3,
148 (PFNRT)__umoddi3,
149#endif
150 NULL
151};
152
153#ifdef RT_OS_SOLARIS
154/* Dependency information for the native solaris loader. */
155extern "C" { char _depends_on[] = "vboxdrv"; }
156#endif
157
158/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
159int g_rcRawModeUsability = VINF_SUCCESS;
160
161
162/**
163 * Initialize the module.
164 * This is called when we're first loaded.
165 *
166 * @returns 0 on success.
167 * @returns VBox status on failure.
168 * @param hMod Image handle for use in APIs.
169 */
170DECLEXPORT(int) ModuleInit(void *hMod)
171{
172 VMM_CHECK_SMAP_SETUP();
173 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
174
175#ifdef VBOX_WITH_DTRACE_R0
176 /*
177 * The first thing to do is register the static tracepoints.
178 * (Deregistration is automatic.)
179 */
180 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
181 if (RT_FAILURE(rc2))
182 return rc2;
183#endif
184 LogFlow(("ModuleInit:\n"));
185
186#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
187 /*
188 * Display the CMOS debug code.
189 */
190 ASMOutU8(0x72, 0x03);
191 uint8_t bDebugCode = ASMInU8(0x73);
192 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
193 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
194#endif
195
196 /*
197 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
198 */
199 int rc = vmmInitFormatTypes();
200 if (RT_SUCCESS(rc))
201 {
202 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
203 rc = GVMMR0Init();
204 if (RT_SUCCESS(rc))
205 {
206 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
207 rc = GMMR0Init();
208 if (RT_SUCCESS(rc))
209 {
210 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
211 rc = HMR0Init();
212 if (RT_SUCCESS(rc))
213 {
214 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
215 rc = PGMRegisterStringFormatTypes();
216 if (RT_SUCCESS(rc))
217 {
218 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
219#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
220 rc = PGMR0DynMapInit();
221#endif
222 if (RT_SUCCESS(rc))
223 {
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225 rc = IntNetR0Init();
226 if (RT_SUCCESS(rc))
227 {
228#ifdef VBOX_WITH_PCI_PASSTHROUGH
229 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
230 rc = PciRawR0Init();
231#endif
232 if (RT_SUCCESS(rc))
233 {
234 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
235 rc = CPUMR0ModuleInit();
236 if (RT_SUCCESS(rc))
237 {
238#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
239 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
240 rc = vmmR0TripleFaultHackInit();
241 if (RT_SUCCESS(rc))
242#endif
243 {
244 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
245 if (RT_SUCCESS(rc))
246 {
247 g_rcRawModeUsability = SUPR0GetRawModeUsability();
248 if (g_rcRawModeUsability != VINF_SUCCESS)
249 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
250 g_rcRawModeUsability);
251 LogFlow(("ModuleInit: returns success\n"));
252 return VINF_SUCCESS;
253 }
254 }
255
256 /*
257 * Bail out.
258 */
259#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
260 vmmR0TripleFaultHackTerm();
261#endif
262 }
263 else
264 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
265#ifdef VBOX_WITH_PCI_PASSTHROUGH
266 PciRawR0Term();
267#endif
268 }
269 else
270 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
271 IntNetR0Term();
272 }
273 else
274 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
275#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
276 PGMR0DynMapTerm();
277#endif
278 }
279 else
280 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
281 PGMDeregisterStringFormatTypes();
282 }
283 else
284 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
285 HMR0Term();
286 }
287 else
288 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
289 GMMR0Term();
290 }
291 else
292 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
293 GVMMR0Term();
294 }
295 else
296 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
297 vmmTermFormatTypes();
298 }
299 else
300 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
301
302 LogFlow(("ModuleInit: failed %Rrc\n", rc));
303 return rc;
304}
305
306
307/**
308 * Terminate the module.
309 * This is called when we're finally unloaded.
310 *
311 * @param hMod Image handle for use in APIs.
312 */
313DECLEXPORT(void) ModuleTerm(void *hMod)
314{
315 NOREF(hMod);
316 LogFlow(("ModuleTerm:\n"));
317
318 /*
319 * Terminate the CPUM module (Local APIC cleanup).
320 */
321 CPUMR0ModuleTerm();
322
323 /*
324 * Terminate the internal network service.
325 */
326 IntNetR0Term();
327
328 /*
329 * PGM (Darwin), HM and PciRaw global cleanup.
330 */
331#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
332 PGMR0DynMapTerm();
333#endif
334#ifdef VBOX_WITH_PCI_PASSTHROUGH
335 PciRawR0Term();
336#endif
337 PGMDeregisterStringFormatTypes();
338 HMR0Term();
339#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
340 vmmR0TripleFaultHackTerm();
341#endif
342
343 /*
344 * Destroy the GMM and GVMM instances.
345 */
346 GMMR0Term();
347 GVMMR0Term();
348
349 vmmTermFormatTypes();
350
351 LogFlow(("ModuleTerm: returns\n"));
352}
353
354
355/**
356 * Initiates the R0 driver for a particular VM instance.
357 *
358 * @returns VBox status code.
359 *
360 * @param pGVM The global (ring-0) VM structure.
361 * @param pVM The cross context VM structure.
362 * @param uSvnRev The SVN revision of the ring-3 part.
363 * @param uBuildType Build type indicator.
364 * @thread EMT(0)
365 */
366static int vmmR0InitVM(PGVM pGVM, PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
367{
368 VMM_CHECK_SMAP_SETUP();
369 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
370
371 /*
372 * Match the SVN revisions and build type.
373 */
374 if (uSvnRev != VMMGetSvnRev())
375 {
376 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
377 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
378 return VERR_VMM_R0_VERSION_MISMATCH;
379 }
380 if (uBuildType != vmmGetBuildType())
381 {
382 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
383 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
384 return VERR_VMM_R0_VERSION_MISMATCH;
385 }
386
387 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
388 if (RT_FAILURE(rc))
389 return rc;
390
391
392#ifdef LOG_ENABLED
393 /*
394 * Register the EMT R0 logger instance for VCPU 0.
395 */
396 PVMCPU pVCpu = &pVM->aCpus[0];
397
398 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
399 if (pR0Logger)
400 {
401# if 0 /* testing of the logger. */
402 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
403 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
404 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
405 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
406
407 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
408 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
409 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
410 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
411
412 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
413 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
414 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
415 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
416
417 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
418 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
419 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
420 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
421 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
422 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
423
424 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
425 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
426
427 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
428 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
429 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
430# endif
431 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
432 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
433 pR0Logger->fRegistered = true;
434 }
435#endif /* LOG_ENABLED */
436
437 /*
438 * Check if the host supports high resolution timers or not.
439 */
440 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
441 && !RTTimerCanDoHighResolution())
442 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
443
444 /*
445 * Initialize the per VM data for GVMM and GMM.
446 */
447 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
448 rc = GVMMR0InitVM(pGVM);
449// if (RT_SUCCESS(rc))
450// rc = GMMR0InitPerVMData(pVM);
451 if (RT_SUCCESS(rc))
452 {
453 /*
454 * Init HM, CPUM and PGM (Darwin only).
455 */
456 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
457 rc = HMR0InitVM(pVM);
458 if (RT_SUCCESS(rc))
459 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
460 if (RT_SUCCESS(rc))
461 {
462 rc = CPUMR0InitVM(pVM);
463 if (RT_SUCCESS(rc))
464 {
465 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
466#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
467 rc = PGMR0DynMapInitVM(pVM);
468#endif
469 if (RT_SUCCESS(rc))
470 {
471 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
472#ifdef VBOX_WITH_PCI_PASSTHROUGH
473 rc = PciRawR0InitVM(pGVM, pVM);
474#endif
475 if (RT_SUCCESS(rc))
476 {
477 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
478 rc = GIMR0InitVM(pVM);
479 if (RT_SUCCESS(rc))
480 {
481 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
482 if (RT_SUCCESS(rc))
483 {
484 GVMMR0DoneInitVM(pGVM);
485 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
486 return rc;
487 }
488
489 /* bail out*/
490 GIMR0TermVM(pVM);
491 }
492#ifdef VBOX_WITH_PCI_PASSTHROUGH
493 PciRawR0TermVM(pGVM, pVM);
494#endif
495 }
496 }
497 }
498 HMR0TermVM(pVM);
499 }
500 }
501
502 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
503 return rc;
504}
505
506
507/**
508 * Does EMT specific VM initialization.
509 *
510 * @returns VBox status code.
511 * @param pGVM The ring-0 VM structure.
512 * @param pVM The cross context VM structure.
513 * @param idCpu The EMT that's calling.
514 */
515static int vmmR0InitVMEmt(PGVM pGVM, PVM pVM, VMCPUID idCpu)
516{
517 /* Paranoia (caller checked these already). */
518 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
519 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
520
521#ifdef LOG_ENABLED
522 /*
523 * Registration of ring 0 loggers.
524 */
525 PVMCPU pVCpu = &pVM->aCpus[idCpu];
526 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
527 if ( pR0Logger
528 && !pR0Logger->fRegistered)
529 {
530 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
531 pR0Logger->fRegistered = true;
532 }
533#endif
534 RT_NOREF(pVM);
535
536 return VINF_SUCCESS;
537}
538
539
540
541/**
542 * Terminates the R0 bits for a particular VM instance.
543 *
544 * This is normally called by ring-3 as part of the VM termination process, but
545 * may alternatively be called during the support driver session cleanup when
546 * the VM object is destroyed (see GVMM).
547 *
548 * @returns VBox status code.
549 *
550 * @param pGVM The global (ring-0) VM structure.
551 * @param pVM The cross context VM structure.
552 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
553 * thread.
554 * @thread EMT(0) or session clean up thread.
555 */
556VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu)
557{
558 /*
559 * Check EMT(0) claim if we're called from userland.
560 */
561 if (idCpu != NIL_VMCPUID)
562 {
563 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
564 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
565 if (RT_FAILURE(rc))
566 return rc;
567 }
568
569#ifdef VBOX_WITH_PCI_PASSTHROUGH
570 PciRawR0TermVM(pGVM, pVM);
571#endif
572
573 /*
574 * Tell GVMM what we're up to and check that we only do this once.
575 */
576 if (GVMMR0DoingTermVM(pGVM))
577 {
578 GIMR0TermVM(pVM);
579
580 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
581 * here to make sure we don't leak any shared pages if we crash... */
582#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
583 PGMR0DynMapTermVM(pVM);
584#endif
585 HMR0TermVM(pVM);
586 }
587
588 /*
589 * Deregister the logger.
590 */
591 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * VMM ring-0 thread-context callback.
598 *
599 * This does common HM state updating and calls the HM-specific thread-context
600 * callback.
601 *
602 * @param enmEvent The thread-context event.
603 * @param pvUser Opaque pointer to the VMCPU.
604 *
605 * @thread EMT(pvUser)
606 */
607static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
608{
609 PVMCPU pVCpu = (PVMCPU)pvUser;
610
611 switch (enmEvent)
612 {
613 case RTTHREADCTXEVENT_IN:
614 {
615 /*
616 * Linux may call us with preemption enabled (really!) but technically we
617 * cannot get preempted here, otherwise we end up in an infinite recursion
618 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
619 * ad infinitum). Let's just disable preemption for now...
620 */
621 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
622 * preemption after doing the callout (one or two functions up the
623 * call chain). */
624 /** @todo r=ramshankar: See @bugref{5313#c30}. */
625 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
626 RTThreadPreemptDisable(&ParanoidPreemptState);
627
628 /* We need to update the VCPU <-> host CPU mapping. */
629 RTCPUID idHostCpu;
630 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
631 pVCpu->iHostCpuSet = iHostCpuSet;
632 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
633
634 /* In the very unlikely event that the GIP delta for the CPU we're
635 rescheduled needs calculating, try force a return to ring-3.
636 We unfortunately cannot do the measurements right here. */
637 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
638 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
639
640 /* Invoke the HM-specific thread-context callback. */
641 HMR0ThreadCtxCallback(enmEvent, pvUser);
642
643 /* Restore preemption. */
644 RTThreadPreemptRestore(&ParanoidPreemptState);
645 break;
646 }
647
648 case RTTHREADCTXEVENT_OUT:
649 {
650 /* Invoke the HM-specific thread-context callback. */
651 HMR0ThreadCtxCallback(enmEvent, pvUser);
652
653 /*
654 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
655 * have the same host CPU associated with it.
656 */
657 pVCpu->iHostCpuSet = UINT32_MAX;
658 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
659 break;
660 }
661
662 default:
663 /* Invoke the HM-specific thread-context callback. */
664 HMR0ThreadCtxCallback(enmEvent, pvUser);
665 break;
666 }
667}
668
669
670/**
671 * Creates thread switching hook for the current EMT thread.
672 *
673 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
674 * platform does not implement switcher hooks, no hooks will be create and the
675 * member set to NIL_RTTHREADCTXHOOK.
676 *
677 * @returns VBox status code.
678 * @param pVCpu The cross context virtual CPU structure.
679 * @thread EMT(pVCpu)
680 */
681VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
682{
683 VMCPU_ASSERT_EMT(pVCpu);
684 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
685
686#if 1 /* To disable this stuff change to zero. */
687 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
688 if (RT_SUCCESS(rc))
689 return rc;
690#else
691 RT_NOREF(vmmR0ThreadCtxCallback);
692 int rc = VERR_NOT_SUPPORTED;
693#endif
694
695 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
696 if (rc == VERR_NOT_SUPPORTED)
697 return VINF_SUCCESS;
698
699 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
700 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
701}
702
703
704/**
705 * Destroys the thread switching hook for the specified VCPU.
706 *
707 * @param pVCpu The cross context virtual CPU structure.
708 * @remarks Can be called from any thread.
709 */
710VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
711{
712 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
713 AssertRC(rc);
714 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
715}
716
717
718/**
719 * Disables the thread switching hook for this VCPU (if we got one).
720 *
721 * @param pVCpu The cross context virtual CPU structure.
722 * @thread EMT(pVCpu)
723 *
724 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
725 * this call. This means you have to be careful with what you do!
726 */
727VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
728{
729 /*
730 * Clear the VCPU <-> host CPU mapping as we've left HM context.
731 * @bugref{7726#c19} explains the need for this trick:
732 *
733 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
734 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
735 * longjmp & normal return to ring-3, which opens a window where we may be
736 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
737 * the CPU starts executing a different EMT. Both functions first disables
738 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
739 * an opening for getting preempted.
740 */
741 /** @todo Make HM not need this API! Then we could leave the hooks enabled
742 * all the time. */
743 /** @todo move this into the context hook disabling if(). */
744 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
745
746 /*
747 * Disable the context hook, if we got one.
748 */
749 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
750 {
751 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
752 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
753 AssertRC(rc);
754 }
755}
756
757
758/**
759 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
760 *
761 * @returns true if registered, false otherwise.
762 * @param pVCpu The cross context virtual CPU structure.
763 */
764DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
765{
766 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
767}
768
769
770/**
771 * Whether thread-context hooks are registered for this VCPU.
772 *
773 * @returns true if registered, false otherwise.
774 * @param pVCpu The cross context virtual CPU structure.
775 */
776VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
777{
778 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
779}
780
781
782#ifdef VBOX_WITH_STATISTICS
783/**
784 * Record return code statistics
785 * @param pVM The cross context VM structure.
786 * @param pVCpu The cross context virtual CPU structure.
787 * @param rc The status code.
788 */
789static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
790{
791 /*
792 * Collect statistics.
793 */
794 switch (rc)
795 {
796 case VINF_SUCCESS:
797 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
798 break;
799 case VINF_EM_RAW_INTERRUPT:
800 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
801 break;
802 case VINF_EM_RAW_INTERRUPT_HYPER:
803 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
804 break;
805 case VINF_EM_RAW_GUEST_TRAP:
806 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
807 break;
808 case VINF_EM_RAW_RING_SWITCH:
809 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
810 break;
811 case VINF_EM_RAW_RING_SWITCH_INT:
812 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
813 break;
814 case VINF_EM_RAW_STALE_SELECTOR:
815 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
816 break;
817 case VINF_EM_RAW_IRET_TRAP:
818 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
819 break;
820 case VINF_IOM_R3_IOPORT_READ:
821 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
822 break;
823 case VINF_IOM_R3_IOPORT_WRITE:
824 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
825 break;
826 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
827 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
828 break;
829 case VINF_IOM_R3_MMIO_READ:
830 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
831 break;
832 case VINF_IOM_R3_MMIO_WRITE:
833 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
834 break;
835 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
836 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
837 break;
838 case VINF_IOM_R3_MMIO_READ_WRITE:
839 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
840 break;
841 case VINF_PATM_HC_MMIO_PATCH_READ:
842 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
843 break;
844 case VINF_PATM_HC_MMIO_PATCH_WRITE:
845 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
846 break;
847 case VINF_CPUM_R3_MSR_READ:
848 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
849 break;
850 case VINF_CPUM_R3_MSR_WRITE:
851 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
852 break;
853 case VINF_EM_RAW_EMULATE_INSTR:
854 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
855 break;
856 case VINF_EM_RAW_EMULATE_IO_BLOCK:
857 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
858 break;
859 case VINF_PATCH_EMULATE_INSTR:
860 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
861 break;
862 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
863 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
864 break;
865 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
866 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
867 break;
868 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
869 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
870 break;
871 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
872 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
873 break;
874 case VINF_CSAM_PENDING_ACTION:
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
876 break;
877 case VINF_PGM_SYNC_CR3:
878 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
879 break;
880 case VINF_PATM_PATCH_INT3:
881 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
882 break;
883 case VINF_PATM_PATCH_TRAP_PF:
884 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
885 break;
886 case VINF_PATM_PATCH_TRAP_GP:
887 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
888 break;
889 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
890 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
891 break;
892 case VINF_EM_RESCHEDULE_REM:
893 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
894 break;
895 case VINF_EM_RAW_TO_R3:
896 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
897 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
898 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
899 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
900 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
901 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
902 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
903 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
904 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
905 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
906 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
907 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
908 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
909 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
910 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
911 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
912 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
913 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
914 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
915 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
916 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
917 else
918 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
919 break;
920
921 case VINF_EM_RAW_TIMER_PENDING:
922 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
923 break;
924 case VINF_EM_RAW_INTERRUPT_PENDING:
925 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
926 break;
927 case VINF_VMM_CALL_HOST:
928 switch (pVCpu->vmm.s.enmCallRing3Operation)
929 {
930 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
931 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
932 break;
933 case VMMCALLRING3_PDM_LOCK:
934 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
935 break;
936 case VMMCALLRING3_PGM_POOL_GROW:
937 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
938 break;
939 case VMMCALLRING3_PGM_LOCK:
940 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
941 break;
942 case VMMCALLRING3_PGM_MAP_CHUNK:
943 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
944 break;
945 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
946 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
947 break;
948 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
949 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
950 break;
951 case VMMCALLRING3_VMM_LOGGER_FLUSH:
952 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
953 break;
954 case VMMCALLRING3_VM_SET_ERROR:
955 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
956 break;
957 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
958 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
959 break;
960 case VMMCALLRING3_VM_R0_ASSERTION:
961 default:
962 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
963 break;
964 }
965 break;
966 case VINF_PATM_DUPLICATE_FUNCTION:
967 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
968 break;
969 case VINF_PGM_CHANGE_MODE:
970 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
971 break;
972 case VINF_PGM_POOL_FLUSH_PENDING:
973 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
974 break;
975 case VINF_EM_PENDING_REQUEST:
976 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
977 break;
978 case VINF_EM_HM_PATCH_TPR_INSTR:
979 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
980 break;
981 default:
982 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
983 break;
984 }
985}
986#endif /* VBOX_WITH_STATISTICS */
987
988
989/**
990 * The Ring 0 entry point, called by the fast-ioctl path.
991 *
992 * @param pGVM The global (ring-0) VM structure.
993 * @param pVM The cross context VM structure.
994 * The return code is stored in pVM->vmm.s.iLastGZRc.
995 * @param idCpu The Virtual CPU ID of the calling EMT.
996 * @param enmOperation Which operation to execute.
997 * @remarks Assume called with interrupts _enabled_.
998 */
999VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1000{
1001 /*
1002 * Validation.
1003 */
1004 if ( idCpu < pGVM->cCpus
1005 && pGVM->cCpus == pVM->cCpus)
1006 { /*likely*/ }
1007 else
1008 {
1009 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
1010 return;
1011 }
1012
1013 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1014 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1015 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1016 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1017 && pVCpu->hNativeThreadR0 == hNativeThread))
1018 { /* likely */ }
1019 else
1020 {
1021 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
1022 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
1023 return;
1024 }
1025
1026 /*
1027 * SMAP fun.
1028 */
1029 VMM_CHECK_SMAP_SETUP();
1030 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1031
1032 /*
1033 * Perform requested operation.
1034 */
1035 switch (enmOperation)
1036 {
1037 /*
1038 * Switch to GC and run guest raw mode code.
1039 * Disable interrupts before doing the world switch.
1040 */
1041 case VMMR0_DO_RAW_RUN:
1042 {
1043#ifdef VBOX_WITH_RAW_MODE
1044# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1045 /* Some safety precautions first. */
1046 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1047 {
1048 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
1049 break;
1050 }
1051# endif
1052 if (RT_SUCCESS(g_rcRawModeUsability))
1053 { /* likely */ }
1054 else
1055 {
1056 pVCpu->vmm.s.iLastGZRc = g_rcRawModeUsability;
1057 break;
1058 }
1059
1060 /*
1061 * Disable preemption.
1062 */
1063 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1064 RTThreadPreemptDisable(&PreemptState);
1065
1066 /*
1067 * Get the host CPU identifiers, make sure they are valid and that
1068 * we've got a TSC delta for the CPU.
1069 */
1070 RTCPUID idHostCpu;
1071 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1072 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1073 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1074 {
1075 /*
1076 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
1077 */
1078# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1079 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1080# endif
1081 pVCpu->iHostCpuSet = iHostCpuSet;
1082 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1083
1084 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1085 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1086
1087 /*
1088 * We might need to disable VT-x if the active switcher turns off paging.
1089 */
1090 bool fVTxDisabled;
1091 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1092 if (RT_SUCCESS(rc))
1093 {
1094 /*
1095 * Disable interrupts and run raw-mode code. The loop is for efficiently
1096 * dispatching tracepoints that fired in raw-mode context.
1097 */
1098 RTCCUINTREG uFlags = ASMIntDisableFlags();
1099
1100 for (;;)
1101 {
1102 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1103 TMNotifyStartOfExecution(pVCpu);
1104
1105 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1106 pVCpu->vmm.s.iLastGZRc = rc;
1107
1108 TMNotifyEndOfExecution(pVCpu);
1109 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1110
1111 if (rc != VINF_VMM_CALL_TRACER)
1112 break;
1113 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1114 }
1115
1116 /*
1117 * Re-enable VT-x before we dispatch any pending host interrupts and
1118 * re-enables interrupts.
1119 */
1120 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1121
1122 if ( rc == VINF_EM_RAW_INTERRUPT
1123 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1124 TRPMR0DispatchHostInterrupt(pVM);
1125
1126 ASMSetFlags(uFlags);
1127
1128 /* Fire dtrace probe and collect statistics. */
1129 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1130# ifdef VBOX_WITH_STATISTICS
1131 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1132 vmmR0RecordRC(pVM, pVCpu, rc);
1133# endif
1134 }
1135 else
1136 pVCpu->vmm.s.iLastGZRc = rc;
1137
1138 /*
1139 * Invalidate the host CPU identifiers as we restore preemption.
1140 */
1141 pVCpu->iHostCpuSet = UINT32_MAX;
1142 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1143
1144 RTThreadPreemptRestore(&PreemptState);
1145 }
1146 /*
1147 * Invalid CPU set index or TSC delta in need of measuring.
1148 */
1149 else
1150 {
1151 RTThreadPreemptRestore(&PreemptState);
1152 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1153 {
1154 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1155 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1156 0 /*default cTries*/);
1157 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1158 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1159 else
1160 pVCpu->vmm.s.iLastGZRc = rc;
1161 }
1162 else
1163 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1164 }
1165
1166#else /* !VBOX_WITH_RAW_MODE */
1167 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1168#endif
1169 break;
1170 }
1171
1172 /*
1173 * Run guest code using the available hardware acceleration technology.
1174 */
1175 case VMMR0_DO_HM_RUN:
1176 {
1177 /*
1178 * Disable preemption.
1179 */
1180 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1181 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1182 RTThreadPreemptDisable(&PreemptState);
1183
1184 /*
1185 * Get the host CPU identifiers, make sure they are valid and that
1186 * we've got a TSC delta for the CPU.
1187 */
1188 RTCPUID idHostCpu;
1189 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1190 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1191 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1192 {
1193 pVCpu->iHostCpuSet = iHostCpuSet;
1194 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1195
1196 /*
1197 * Update the periodic preemption timer if it's active.
1198 */
1199 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1200 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1201 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1202
1203#ifdef VMM_R0_TOUCH_FPU
1204 /*
1205 * Make sure we've got the FPU state loaded so and we don't need to clear
1206 * CR0.TS and get out of sync with the host kernel when loading the guest
1207 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1208 */
1209 CPUMR0TouchHostFpu();
1210#endif
1211 int rc;
1212 bool fPreemptRestored = false;
1213 if (!HMR0SuspendPending())
1214 {
1215 /*
1216 * Enable the context switching hook.
1217 */
1218 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1219 {
1220 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1221 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1222 }
1223
1224 /*
1225 * Enter HM context.
1226 */
1227 rc = HMR0Enter(pVM, pVCpu);
1228 if (RT_SUCCESS(rc))
1229 {
1230 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1231
1232 /*
1233 * When preemption hooks are in place, enable preemption now that
1234 * we're in HM context.
1235 */
1236 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1237 {
1238 fPreemptRestored = true;
1239 RTThreadPreemptRestore(&PreemptState);
1240 }
1241
1242 /*
1243 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1244 */
1245 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1246 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1247 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1248
1249 /*
1250 * Assert sanity on the way out. Using manual assertions code here as normal
1251 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1252 */
1253 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1254 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1255 {
1256 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1257 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1258 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1259 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1260 }
1261 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1262 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1263 {
1264 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1265 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1266 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1267 rc = VERR_INVALID_STATE;
1268 }
1269
1270 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1271 }
1272 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1273
1274 /*
1275 * Invalidate the host CPU identifiers before we disable the context
1276 * hook / restore preemption.
1277 */
1278 pVCpu->iHostCpuSet = UINT32_MAX;
1279 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1280
1281 /*
1282 * Disable context hooks. Due to unresolved cleanup issues, we
1283 * cannot leave the hooks enabled when we return to ring-3.
1284 *
1285 * Note! At the moment HM may also have disabled the hook
1286 * when we get here, but the IPRT API handles that.
1287 */
1288 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1289 {
1290 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1291 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1292 }
1293 }
1294 /*
1295 * The system is about to go into suspend mode; go back to ring 3.
1296 */
1297 else
1298 {
1299 rc = VINF_EM_RAW_INTERRUPT;
1300 pVCpu->iHostCpuSet = UINT32_MAX;
1301 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1302 }
1303
1304 /** @todo When HM stops messing with the context hook state, we'll disable
1305 * preemption again before the RTThreadCtxHookDisable call. */
1306 if (!fPreemptRestored)
1307 RTThreadPreemptRestore(&PreemptState);
1308
1309 pVCpu->vmm.s.iLastGZRc = rc;
1310
1311 /* Fire dtrace probe and collect statistics. */
1312 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1313#ifdef VBOX_WITH_STATISTICS
1314 vmmR0RecordRC(pVM, pVCpu, rc);
1315#endif
1316 }
1317 /*
1318 * Invalid CPU set index or TSC delta in need of measuring.
1319 */
1320 else
1321 {
1322 pVCpu->iHostCpuSet = UINT32_MAX;
1323 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1324 RTThreadPreemptRestore(&PreemptState);
1325 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1326 {
1327 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1328 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1329 0 /*default cTries*/);
1330 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1331 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1332 else
1333 pVCpu->vmm.s.iLastGZRc = rc;
1334 }
1335 else
1336 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1337 }
1338 break;
1339 }
1340
1341#ifdef VBOX_WITH_NEM_R0
1342# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1343 case VMMR0_DO_NEM_RUN:
1344 {
1345 /*
1346 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1347 */
1348 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1349 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1350 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1351 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1352
1353 pVCpu->vmm.s.iLastGZRc = rc;
1354
1355 /*
1356 * Fire dtrace probe and collect statistics.
1357 */
1358 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1359# ifdef VBOX_WITH_STATISTICS
1360 vmmR0RecordRC(pVM, pVCpu, rc);
1361# endif
1362 break;
1363 }
1364# endif
1365#endif
1366
1367
1368 /*
1369 * For profiling.
1370 */
1371 case VMMR0_DO_NOP:
1372 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1373 break;
1374
1375 /*
1376 * Shouldn't happen.
1377 */
1378 default:
1379 AssertMsgFailed(("%#x\n", enmOperation));
1380 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1381 break;
1382 }
1383 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1384}
1385
1386
1387/**
1388 * Validates a session or VM session argument.
1389 *
1390 * @returns true / false accordingly.
1391 * @param pVM The cross context VM structure.
1392 * @param pClaimedSession The session claim to validate.
1393 * @param pSession The session argument.
1394 */
1395DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1396{
1397 /* This must be set! */
1398 if (!pSession)
1399 return false;
1400
1401 /* Only one out of the two. */
1402 if (pVM && pClaimedSession)
1403 return false;
1404 if (pVM)
1405 pClaimedSession = pVM->pSession;
1406 return pClaimedSession == pSession;
1407}
1408
1409
1410/**
1411 * VMMR0EntryEx worker function, either called directly or when ever possible
1412 * called thru a longjmp so we can exit safely on failure.
1413 *
1414 * @returns VBox status code.
1415 * @param pGVM The global (ring-0) VM structure.
1416 * @param pVM The cross context VM structure.
1417 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1418 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1419 * @param enmOperation Which operation to execute.
1420 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1421 * The support driver validates this if it's present.
1422 * @param u64Arg Some simple constant argument.
1423 * @param pSession The session of the caller.
1424 *
1425 * @remarks Assume called with interrupts _enabled_.
1426 */
1427static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1428 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1429{
1430 /*
1431 * Validate pGVM, pVM and idCpu for consistency and validity.
1432 */
1433 if ( pGVM != NULL
1434 || pVM != NULL)
1435 {
1436 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1437 && RT_VALID_PTR(pVM)
1438 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1439 { /* likely */ }
1440 else
1441 {
1442 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1443 return VERR_INVALID_POINTER;
1444 }
1445
1446 if (RT_LIKELY(pGVM->pVM == pVM))
1447 { /* likely */ }
1448 else
1449 {
1450 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1451 return VERR_INVALID_PARAMETER;
1452 }
1453
1454 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1455 { /* likely */ }
1456 else
1457 {
1458 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1459 return VERR_INVALID_PARAMETER;
1460 }
1461
1462 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1463 && pVM->enmVMState <= VMSTATE_TERMINATED
1464 && pVM->cCpus == pGVM->cCpus
1465 && pVM->pSession == pSession
1466 && pVM->pVMR0 == pVM))
1467 { /* likely */ }
1468 else
1469 {
1470 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1471 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1472 return VERR_INVALID_POINTER;
1473 }
1474 }
1475 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1476 { /* likely */ }
1477 else
1478 {
1479 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1480 return VERR_INVALID_PARAMETER;
1481 }
1482
1483 /*
1484 * SMAP fun.
1485 */
1486 VMM_CHECK_SMAP_SETUP();
1487 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1488
1489 /*
1490 * Process the request.
1491 */
1492 int rc;
1493 switch (enmOperation)
1494 {
1495 /*
1496 * GVM requests
1497 */
1498 case VMMR0_DO_GVMM_CREATE_VM:
1499 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1500 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1501 else
1502 rc = VERR_INVALID_PARAMETER;
1503 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1504 break;
1505
1506 case VMMR0_DO_GVMM_DESTROY_VM:
1507 if (pReqHdr == NULL && u64Arg == 0)
1508 rc = GVMMR0DestroyVM(pGVM, pVM);
1509 else
1510 rc = VERR_INVALID_PARAMETER;
1511 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1512 break;
1513
1514 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1515 if (pGVM != NULL && pVM != NULL)
1516 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1517 else
1518 rc = VERR_INVALID_PARAMETER;
1519 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1520 break;
1521
1522 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1523 if (pGVM != NULL && pVM != NULL)
1524 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1525 else
1526 rc = VERR_INVALID_PARAMETER;
1527 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1528 break;
1529
1530 case VMMR0_DO_GVMM_SCHED_HALT:
1531 if (pReqHdr)
1532 return VERR_INVALID_PARAMETER;
1533 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1534 rc = GVMMR0SchedHalt(pGVM, pVM, idCpu, u64Arg);
1535 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1536 break;
1537
1538 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1539 if (pReqHdr || u64Arg)
1540 return VERR_INVALID_PARAMETER;
1541 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1542 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1543 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1544 break;
1545
1546 case VMMR0_DO_GVMM_SCHED_POKE:
1547 if (pReqHdr || u64Arg)
1548 return VERR_INVALID_PARAMETER;
1549 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1550 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1551 break;
1552
1553 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1554 if (u64Arg)
1555 return VERR_INVALID_PARAMETER;
1556 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1557 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1558 break;
1559
1560 case VMMR0_DO_GVMM_SCHED_POLL:
1561 if (pReqHdr || u64Arg > 1)
1562 return VERR_INVALID_PARAMETER;
1563 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1564 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1565 break;
1566
1567 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1568 if (u64Arg)
1569 return VERR_INVALID_PARAMETER;
1570 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1571 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1572 break;
1573
1574 case VMMR0_DO_GVMM_RESET_STATISTICS:
1575 if (u64Arg)
1576 return VERR_INVALID_PARAMETER;
1577 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1578 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1579 break;
1580
1581 /*
1582 * Initialize the R0 part of a VM instance.
1583 */
1584 case VMMR0_DO_VMMR0_INIT:
1585 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1586 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1587 break;
1588
1589 /*
1590 * Does EMT specific ring-0 init.
1591 */
1592 case VMMR0_DO_VMMR0_INIT_EMT:
1593 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
1594 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1595 break;
1596
1597 /*
1598 * Terminate the R0 part of a VM instance.
1599 */
1600 case VMMR0_DO_VMMR0_TERM:
1601 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1602 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1603 break;
1604
1605 /*
1606 * Attempt to enable hm mode and check the current setting.
1607 */
1608 case VMMR0_DO_HM_ENABLE:
1609 rc = HMR0EnableAllCpus(pVM);
1610 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1611 break;
1612
1613 /*
1614 * Setup the hardware accelerated session.
1615 */
1616 case VMMR0_DO_HM_SETUP_VM:
1617 rc = HMR0SetupVM(pVM);
1618 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1619 break;
1620
1621 /*
1622 * Switch to RC to execute Hypervisor function.
1623 */
1624 case VMMR0_DO_CALL_HYPERVISOR:
1625 {
1626#ifdef VBOX_WITH_RAW_MODE
1627 /*
1628 * Validate input / context.
1629 */
1630 if (RT_UNLIKELY(idCpu != 0))
1631 return VERR_INVALID_CPU_ID;
1632 if (RT_UNLIKELY(pVM->cCpus != 1))
1633 return VERR_INVALID_PARAMETER;
1634 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1635# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1636 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1637 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1638# endif
1639 if (RT_FAILURE(g_rcRawModeUsability))
1640 return g_rcRawModeUsability;
1641
1642 /*
1643 * Disable interrupts.
1644 */
1645 RTCCUINTREG fFlags = ASMIntDisableFlags();
1646
1647 /*
1648 * Get the host CPU identifiers, make sure they are valid and that
1649 * we've got a TSC delta for the CPU.
1650 */
1651 RTCPUID idHostCpu;
1652 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1653 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1654 {
1655 ASMSetFlags(fFlags);
1656 return VERR_INVALID_CPU_INDEX;
1657 }
1658 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1659 {
1660 ASMSetFlags(fFlags);
1661 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1662 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1663 0 /*default cTries*/);
1664 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1665 {
1666 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1667 return rc;
1668 }
1669 }
1670
1671 /*
1672 * Commit the CPU identifiers.
1673 */
1674# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1675 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1676# endif
1677 pVCpu->iHostCpuSet = iHostCpuSet;
1678 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1679
1680 /*
1681 * We might need to disable VT-x if the active switcher turns off paging.
1682 */
1683 bool fVTxDisabled;
1684 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1685 if (RT_SUCCESS(rc))
1686 {
1687 /*
1688 * Go through the wormhole...
1689 */
1690 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1691
1692 /*
1693 * Re-enable VT-x before we dispatch any pending host interrupts.
1694 */
1695 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1696
1697 if ( rc == VINF_EM_RAW_INTERRUPT
1698 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1699 TRPMR0DispatchHostInterrupt(pVM);
1700 }
1701
1702 /*
1703 * Invalidate the host CPU identifiers as we restore interrupts.
1704 */
1705 pVCpu->iHostCpuSet = UINT32_MAX;
1706 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1707 ASMSetFlags(fFlags);
1708
1709#else /* !VBOX_WITH_RAW_MODE */
1710 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1711#endif
1712 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1713 break;
1714 }
1715
1716 /*
1717 * PGM wrappers.
1718 */
1719 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1720 if (idCpu == NIL_VMCPUID)
1721 return VERR_INVALID_CPU_ID;
1722 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1723 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1724 break;
1725
1726 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1727 if (idCpu == NIL_VMCPUID)
1728 return VERR_INVALID_CPU_ID;
1729 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1730 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1731 break;
1732
1733 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1734 if (idCpu == NIL_VMCPUID)
1735 return VERR_INVALID_CPU_ID;
1736 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1737 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1738 break;
1739
1740 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1741 if (idCpu != 0)
1742 return VERR_INVALID_CPU_ID;
1743 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1744 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1745 break;
1746
1747 /*
1748 * GMM wrappers.
1749 */
1750 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1751 if (u64Arg)
1752 return VERR_INVALID_PARAMETER;
1753 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1754 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1755 break;
1756
1757 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1758 if (u64Arg)
1759 return VERR_INVALID_PARAMETER;
1760 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1761 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1762 break;
1763
1764 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1765 if (u64Arg)
1766 return VERR_INVALID_PARAMETER;
1767 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1768 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1769 break;
1770
1771 case VMMR0_DO_GMM_FREE_PAGES:
1772 if (u64Arg)
1773 return VERR_INVALID_PARAMETER;
1774 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1775 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1776 break;
1777
1778 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1779 if (u64Arg)
1780 return VERR_INVALID_PARAMETER;
1781 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1782 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1783 break;
1784
1785 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1786 if (u64Arg)
1787 return VERR_INVALID_PARAMETER;
1788 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1789 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1790 break;
1791
1792 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1793 if (idCpu == NIL_VMCPUID)
1794 return VERR_INVALID_CPU_ID;
1795 if (u64Arg)
1796 return VERR_INVALID_PARAMETER;
1797 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1798 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1799 break;
1800
1801 case VMMR0_DO_GMM_BALLOONED_PAGES:
1802 if (u64Arg)
1803 return VERR_INVALID_PARAMETER;
1804 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1805 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1806 break;
1807
1808 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1809 if (u64Arg)
1810 return VERR_INVALID_PARAMETER;
1811 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1812 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1813 break;
1814
1815 case VMMR0_DO_GMM_SEED_CHUNK:
1816 if (pReqHdr)
1817 return VERR_INVALID_PARAMETER;
1818 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1819 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1820 break;
1821
1822 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1823 if (idCpu == NIL_VMCPUID)
1824 return VERR_INVALID_CPU_ID;
1825 if (u64Arg)
1826 return VERR_INVALID_PARAMETER;
1827 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1828 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1829 break;
1830
1831 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1832 if (idCpu == NIL_VMCPUID)
1833 return VERR_INVALID_CPU_ID;
1834 if (u64Arg)
1835 return VERR_INVALID_PARAMETER;
1836 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1837 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1838 break;
1839
1840 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1841 if (idCpu == NIL_VMCPUID)
1842 return VERR_INVALID_CPU_ID;
1843 if ( u64Arg
1844 || pReqHdr)
1845 return VERR_INVALID_PARAMETER;
1846 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1847 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1848 break;
1849
1850#ifdef VBOX_WITH_PAGE_SHARING
1851 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1852 {
1853 if (idCpu == NIL_VMCPUID)
1854 return VERR_INVALID_CPU_ID;
1855 if ( u64Arg
1856 || pReqHdr)
1857 return VERR_INVALID_PARAMETER;
1858 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1859 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1860 break;
1861 }
1862#endif
1863
1864#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1865 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1866 if (u64Arg)
1867 return VERR_INVALID_PARAMETER;
1868 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1869 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1870 break;
1871#endif
1872
1873 case VMMR0_DO_GMM_QUERY_STATISTICS:
1874 if (u64Arg)
1875 return VERR_INVALID_PARAMETER;
1876 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1877 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1878 break;
1879
1880 case VMMR0_DO_GMM_RESET_STATISTICS:
1881 if (u64Arg)
1882 return VERR_INVALID_PARAMETER;
1883 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1884 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1885 break;
1886
1887 /*
1888 * A quick GCFGM mock-up.
1889 */
1890 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1891 case VMMR0_DO_GCFGM_SET_VALUE:
1892 case VMMR0_DO_GCFGM_QUERY_VALUE:
1893 {
1894 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1895 return VERR_INVALID_PARAMETER;
1896 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1897 if (pReq->Hdr.cbReq != sizeof(*pReq))
1898 return VERR_INVALID_PARAMETER;
1899 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1900 {
1901 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1902 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1903 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1904 }
1905 else
1906 {
1907 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1908 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1909 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1910 }
1911 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1912 break;
1913 }
1914
1915 /*
1916 * PDM Wrappers.
1917 */
1918 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1919 {
1920 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1921 return VERR_INVALID_PARAMETER;
1922 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1923 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1924 break;
1925 }
1926
1927 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1928 {
1929 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1930 return VERR_INVALID_PARAMETER;
1931 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1932 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1933 break;
1934 }
1935
1936 /*
1937 * Requests to the internal networking service.
1938 */
1939 case VMMR0_DO_INTNET_OPEN:
1940 {
1941 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1942 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1943 return VERR_INVALID_PARAMETER;
1944 rc = IntNetR0OpenReq(pSession, pReq);
1945 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1946 break;
1947 }
1948
1949 case VMMR0_DO_INTNET_IF_CLOSE:
1950 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1951 return VERR_INVALID_PARAMETER;
1952 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1953 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1954 break;
1955
1956
1957 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1958 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1959 return VERR_INVALID_PARAMETER;
1960 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1961 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1962 break;
1963
1964 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1965 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1966 return VERR_INVALID_PARAMETER;
1967 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1968 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1969 break;
1970
1971 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1972 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1973 return VERR_INVALID_PARAMETER;
1974 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1975 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1976 break;
1977
1978 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1979 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1980 return VERR_INVALID_PARAMETER;
1981 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1982 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1983 break;
1984
1985 case VMMR0_DO_INTNET_IF_SEND:
1986 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1987 return VERR_INVALID_PARAMETER;
1988 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1989 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1990 break;
1991
1992 case VMMR0_DO_INTNET_IF_WAIT:
1993 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1994 return VERR_INVALID_PARAMETER;
1995 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1996 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1997 break;
1998
1999 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2000 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2001 return VERR_INVALID_PARAMETER;
2002 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2003 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2004 break;
2005
2006#ifdef VBOX_WITH_PCI_PASSTHROUGH
2007 /*
2008 * Requests to host PCI driver service.
2009 */
2010 case VMMR0_DO_PCIRAW_REQ:
2011 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2012 return VERR_INVALID_PARAMETER;
2013 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2014 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2015 break;
2016#endif
2017
2018 /*
2019 * NEM requests.
2020 */
2021#ifdef VBOX_WITH_NEM_R0
2022# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2023 case VMMR0_DO_NEM_INIT_VM:
2024 if (u64Arg || pReqHdr || idCpu != 0)
2025 return VERR_INVALID_PARAMETER;
2026 rc = NEMR0InitVM(pGVM, pVM);
2027 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2028 break;
2029
2030 case VMMR0_DO_NEM_INIT_VM_PART_2:
2031 if (u64Arg || pReqHdr || idCpu != 0)
2032 return VERR_INVALID_PARAMETER;
2033 rc = NEMR0InitVMPart2(pGVM, pVM);
2034 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2035 break;
2036
2037 case VMMR0_DO_NEM_MAP_PAGES:
2038 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2039 return VERR_INVALID_PARAMETER;
2040 rc = NEMR0MapPages(pGVM, pVM, idCpu);
2041 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2042 break;
2043
2044 case VMMR0_DO_NEM_UNMAP_PAGES:
2045 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2046 return VERR_INVALID_PARAMETER;
2047 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
2048 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2049 break;
2050
2051 case VMMR0_DO_NEM_EXPORT_STATE:
2052 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2053 return VERR_INVALID_PARAMETER;
2054 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2055 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2056 break;
2057
2058 case VMMR0_DO_NEM_IMPORT_STATE:
2059 if (pReqHdr || idCpu == NIL_VMCPUID)
2060 return VERR_INVALID_PARAMETER;
2061 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2062 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2063 break;
2064# endif
2065#endif
2066
2067 /*
2068 * For profiling.
2069 */
2070 case VMMR0_DO_NOP:
2071 case VMMR0_DO_SLOW_NOP:
2072 return VINF_SUCCESS;
2073
2074 /*
2075 * For testing Ring-0 APIs invoked in this environment.
2076 */
2077 case VMMR0_DO_TESTS:
2078 /** @todo make new test */
2079 return VINF_SUCCESS;
2080
2081
2082#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
2083 case VMMR0_DO_TEST_SWITCHER3264:
2084 if (idCpu == NIL_VMCPUID)
2085 return VERR_INVALID_CPU_ID;
2086 rc = HMR0TestSwitcher3264(pVM);
2087 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2088 break;
2089#endif
2090 default:
2091 /*
2092 * We're returning VERR_NOT_SUPPORT here so we've got something else
2093 * than -1 which the interrupt gate glue code might return.
2094 */
2095 Log(("operation %#x is not supported\n", enmOperation));
2096 return VERR_NOT_SUPPORTED;
2097 }
2098 return rc;
2099}
2100
2101
2102/**
2103 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2104 */
2105typedef struct VMMR0ENTRYEXARGS
2106{
2107 PGVM pGVM;
2108 PVM pVM;
2109 VMCPUID idCpu;
2110 VMMR0OPERATION enmOperation;
2111 PSUPVMMR0REQHDR pReq;
2112 uint64_t u64Arg;
2113 PSUPDRVSESSION pSession;
2114} VMMR0ENTRYEXARGS;
2115/** Pointer to a vmmR0EntryExWrapper argument package. */
2116typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2117
2118/**
2119 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2120 *
2121 * @returns VBox status code.
2122 * @param pvArgs The argument package
2123 */
2124static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2125{
2126 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2127 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2128 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2129 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2130 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2131 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2132 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2133}
2134
2135
2136/**
2137 * The Ring 0 entry point, called by the support library (SUP).
2138 *
2139 * @returns VBox status code.
2140 * @param pGVM The global (ring-0) VM structure.
2141 * @param pVM The cross context VM structure.
2142 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2143 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2144 * @param enmOperation Which operation to execute.
2145 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2146 * @param u64Arg Some simple constant argument.
2147 * @param pSession The session of the caller.
2148 * @remarks Assume called with interrupts _enabled_.
2149 */
2150VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2151 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2152{
2153 /*
2154 * Requests that should only happen on the EMT thread will be
2155 * wrapped in a setjmp so we can assert without causing trouble.
2156 */
2157 if ( pVM != NULL
2158 && pGVM != NULL
2159 && idCpu < pGVM->cCpus
2160 && pVM->pVMR0 != NULL)
2161 {
2162 switch (enmOperation)
2163 {
2164 /* These might/will be called before VMMR3Init. */
2165 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2166 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2167 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2168 case VMMR0_DO_GMM_FREE_PAGES:
2169 case VMMR0_DO_GMM_BALLOONED_PAGES:
2170 /* On the mac we might not have a valid jmp buf, so check these as well. */
2171 case VMMR0_DO_VMMR0_INIT:
2172 case VMMR0_DO_VMMR0_TERM:
2173 {
2174 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2175 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2176 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2177 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2178 && pVCpu->hNativeThreadR0 == hNativeThread))
2179 {
2180 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2181 break;
2182
2183 /** @todo validate this EMT claim... GVM knows. */
2184 VMMR0ENTRYEXARGS Args;
2185 Args.pGVM = pGVM;
2186 Args.pVM = pVM;
2187 Args.idCpu = idCpu;
2188 Args.enmOperation = enmOperation;
2189 Args.pReq = pReq;
2190 Args.u64Arg = u64Arg;
2191 Args.pSession = pSession;
2192 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2193 }
2194 return VERR_VM_THREAD_NOT_EMT;
2195 }
2196
2197 default:
2198 break;
2199 }
2200 }
2201 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2202}
2203
2204
2205/**
2206 * Checks whether we've armed the ring-0 long jump machinery.
2207 *
2208 * @returns @c true / @c false
2209 * @param pVCpu The cross context virtual CPU structure.
2210 * @thread EMT
2211 * @sa VMMIsLongJumpArmed
2212 */
2213VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2214{
2215#ifdef RT_ARCH_X86
2216 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2217 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2218#else
2219 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2220 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2221#endif
2222}
2223
2224
2225/**
2226 * Checks whether we've done a ring-3 long jump.
2227 *
2228 * @returns @c true / @c false
2229 * @param pVCpu The cross context virtual CPU structure.
2230 * @thread EMT
2231 */
2232VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2233{
2234 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2235}
2236
2237
2238/**
2239 * Internal R0 logger worker: Flush logger.
2240 *
2241 * @param pLogger The logger instance to flush.
2242 * @remark This function must be exported!
2243 */
2244VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2245{
2246#ifdef LOG_ENABLED
2247 /*
2248 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2249 * (This is a bit paranoid code.)
2250 */
2251 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2252 if ( !VALID_PTR(pR0Logger)
2253 || !VALID_PTR(pR0Logger + 1)
2254 || pLogger->u32Magic != RTLOGGER_MAGIC)
2255 {
2256# ifdef DEBUG
2257 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2258# endif
2259 return;
2260 }
2261 if (pR0Logger->fFlushingDisabled)
2262 return; /* quietly */
2263
2264 PVM pVM = pR0Logger->pVM;
2265 if ( !VALID_PTR(pVM)
2266 || pVM->pVMR0 != pVM)
2267 {
2268# ifdef DEBUG
2269 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2270# endif
2271 return;
2272 }
2273
2274 PVMCPU pVCpu = VMMGetCpu(pVM);
2275 if (pVCpu)
2276 {
2277 /*
2278 * Check that the jump buffer is armed.
2279 */
2280# ifdef RT_ARCH_X86
2281 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2282 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2283# else
2284 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2285 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2286# endif
2287 {
2288# ifdef DEBUG
2289 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2290# endif
2291 return;
2292 }
2293 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2294 }
2295# ifdef DEBUG
2296 else
2297 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2298# endif
2299#else
2300 NOREF(pLogger);
2301#endif /* LOG_ENABLED */
2302}
2303
2304/**
2305 * Internal R0 logger worker: Custom prefix.
2306 *
2307 * @returns Number of chars written.
2308 *
2309 * @param pLogger The logger instance.
2310 * @param pchBuf The output buffer.
2311 * @param cchBuf The size of the buffer.
2312 * @param pvUser User argument (ignored).
2313 */
2314VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2315{
2316 NOREF(pvUser);
2317#ifdef LOG_ENABLED
2318 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2319 if ( !VALID_PTR(pR0Logger)
2320 || !VALID_PTR(pR0Logger + 1)
2321 || pLogger->u32Magic != RTLOGGER_MAGIC
2322 || cchBuf < 2)
2323 return 0;
2324
2325 static const char s_szHex[17] = "0123456789abcdef";
2326 VMCPUID const idCpu = pR0Logger->idCpu;
2327 pchBuf[1] = s_szHex[ idCpu & 15];
2328 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2329
2330 return 2;
2331#else
2332 NOREF(pLogger); NOREF(pchBuf); NOREF(cchBuf);
2333 return 0;
2334#endif
2335}
2336
2337#ifdef LOG_ENABLED
2338
2339/**
2340 * Disables flushing of the ring-0 debug log.
2341 *
2342 * @param pVCpu The cross context virtual CPU structure.
2343 */
2344VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2345{
2346 if (pVCpu->vmm.s.pR0LoggerR0)
2347 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2348}
2349
2350
2351/**
2352 * Enables flushing of the ring-0 debug log.
2353 *
2354 * @param pVCpu The cross context virtual CPU structure.
2355 */
2356VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2357{
2358 if (pVCpu->vmm.s.pR0LoggerR0)
2359 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2360}
2361
2362
2363/**
2364 * Checks if log flushing is disabled or not.
2365 *
2366 * @param pVCpu The cross context virtual CPU structure.
2367 */
2368VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2369{
2370 if (pVCpu->vmm.s.pR0LoggerR0)
2371 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2372 return true;
2373}
2374#endif /* LOG_ENABLED */
2375
2376/**
2377 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2378 *
2379 * @returns true if the breakpoint should be hit, false if it should be ignored.
2380 */
2381DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2382{
2383#if 0
2384 return true;
2385#else
2386 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2387 if (pVM)
2388 {
2389 PVMCPU pVCpu = VMMGetCpu(pVM);
2390
2391 if (pVCpu)
2392 {
2393#ifdef RT_ARCH_X86
2394 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2395 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2396#else
2397 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2398 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2399#endif
2400 {
2401 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2402 return RT_FAILURE_NP(rc);
2403 }
2404 }
2405 }
2406#ifdef RT_OS_LINUX
2407 return true;
2408#else
2409 return false;
2410#endif
2411#endif
2412}
2413
2414
2415/**
2416 * Override this so we can push it up to ring-3.
2417 *
2418 * @param pszExpr Expression. Can be NULL.
2419 * @param uLine Location line number.
2420 * @param pszFile Location file name.
2421 * @param pszFunction Location function name.
2422 */
2423DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2424{
2425 /*
2426 * To the log.
2427 */
2428 LogAlways(("\n!!R0-Assertion Failed!!\n"
2429 "Expression: %s\n"
2430 "Location : %s(%d) %s\n",
2431 pszExpr, pszFile, uLine, pszFunction));
2432
2433 /*
2434 * To the global VMM buffer.
2435 */
2436 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2437 if (pVM)
2438 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2439 "\n!!R0-Assertion Failed!!\n"
2440 "Expression: %.*s\n"
2441 "Location : %s(%d) %s\n",
2442 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2443 pszFile, uLine, pszFunction);
2444
2445 /*
2446 * Continue the normal way.
2447 */
2448 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2449}
2450
2451
2452/**
2453 * Callback for RTLogFormatV which writes to the ring-3 log port.
2454 * See PFNLOGOUTPUT() for details.
2455 */
2456static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2457{
2458 for (size_t i = 0; i < cbChars; i++)
2459 {
2460 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2461 }
2462
2463 NOREF(pv);
2464 return cbChars;
2465}
2466
2467
2468/**
2469 * Override this so we can push it up to ring-3.
2470 *
2471 * @param pszFormat The format string.
2472 * @param va Arguments.
2473 */
2474DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2475{
2476 va_list vaCopy;
2477
2478 /*
2479 * Push the message to the loggers.
2480 */
2481 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2482 if (pLog)
2483 {
2484 va_copy(vaCopy, va);
2485 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2486 va_end(vaCopy);
2487 }
2488 pLog = RTLogRelGetDefaultInstance();
2489 if (pLog)
2490 {
2491 va_copy(vaCopy, va);
2492 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2493 va_end(vaCopy);
2494 }
2495
2496 /*
2497 * Push it to the global VMM buffer.
2498 */
2499 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2500 if (pVM)
2501 {
2502 va_copy(vaCopy, va);
2503 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2504 va_end(vaCopy);
2505 }
2506
2507 /*
2508 * Continue the normal way.
2509 */
2510 RTAssertMsg2V(pszFormat, va);
2511}
2512
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette