VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 71214

Last change on this file since 71214 was 71198, checked in by vboxsync, 7 years ago

SUPDrv,VMMR0: Prepped for extending the fast I/O control interface a bit for NEM; SUPDRV version increment. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 87.0 KB
Line 
1/* $Id: VMMR0.cpp 71198 2018-03-05 10:59:17Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/stam.h>
33#include <VBox/vmm/tm.h>
34#include "VMMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/gvm.h>
37#ifdef VBOX_WITH_PCI_PASSTHROUGH
38# include <VBox/vmm/pdmpci.h>
39#endif
40#include <VBox/vmm/apic.h>
41
42#include <VBox/vmm/gvmm.h>
43#include <VBox/vmm/gmm.h>
44#include <VBox/vmm/gim.h>
45#include <VBox/intnet.h>
46#include <VBox/vmm/hm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49#include <VBox/version.h>
50#include <VBox/log.h>
51
52#include <iprt/asm-amd64-x86.h>
53#include <iprt/assert.h>
54#include <iprt/crc.h>
55#include <iprt/mp.h>
56#include <iprt/once.h>
57#include <iprt/stdarg.h>
58#include <iprt/string.h>
59#include <iprt/thread.h>
60#include <iprt/timer.h>
61
62#include "dtrace/VBoxVMM.h"
63
64
65#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
66# pragma intrinsic(_AddressOfReturnAddress)
67#endif
68
69#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
70# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
71#endif
72
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78/** @def VMM_CHECK_SMAP_SETUP
79 * SMAP check setup. */
80/** @def VMM_CHECK_SMAP_CHECK
81 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
82 * it will be logged and @a a_BadExpr is executed. */
83/** @def VMM_CHECK_SMAP_CHECK2
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
85 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
86 * executed. */
87#if defined(VBOX_STRICT) || 1
88# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
89# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
90 do { \
91 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
92 { \
93 RTCCUINTREG fEflCheck = ASMGetFlags(); \
94 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
95 { /* likely */ } \
96 else \
97 { \
98 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
99 a_BadExpr; \
100 } \
101 } \
102 } while (0)
103# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
104 do { \
105 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
106 { \
107 RTCCUINTREG fEflCheck = ASMGetFlags(); \
108 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
109 { /* likely */ } \
110 else \
111 { \
112 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
113 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
114 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
115 a_BadExpr; \
116 } \
117 } \
118 } while (0)
119#else
120# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
121# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
122# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
123#endif
124
125
126/*********************************************************************************************************************************
127* Internal Functions *
128*********************************************************************************************************************************/
129RT_C_DECLS_BEGIN
130#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
131extern uint64_t __udivdi3(uint64_t, uint64_t);
132extern uint64_t __umoddi3(uint64_t, uint64_t);
133#endif
134RT_C_DECLS_END
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140/** Drag in necessary library bits.
141 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
142PFNRT g_VMMR0Deps[] =
143{
144 (PFNRT)RTCrc32,
145 (PFNRT)RTOnce,
146#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
147 (PFNRT)__udivdi3,
148 (PFNRT)__umoddi3,
149#endif
150 NULL
151};
152
153#ifdef RT_OS_SOLARIS
154/* Dependency information for the native solaris loader. */
155extern "C" { char _depends_on[] = "vboxdrv"; }
156#endif
157
158/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
159int g_rcRawModeUsability = VINF_SUCCESS;
160
161
162/**
163 * Initialize the module.
164 * This is called when we're first loaded.
165 *
166 * @returns 0 on success.
167 * @returns VBox status on failure.
168 * @param hMod Image handle for use in APIs.
169 */
170DECLEXPORT(int) ModuleInit(void *hMod)
171{
172 VMM_CHECK_SMAP_SETUP();
173 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
174
175#ifdef VBOX_WITH_DTRACE_R0
176 /*
177 * The first thing to do is register the static tracepoints.
178 * (Deregistration is automatic.)
179 */
180 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
181 if (RT_FAILURE(rc2))
182 return rc2;
183#endif
184 LogFlow(("ModuleInit:\n"));
185
186#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
187 /*
188 * Display the CMOS debug code.
189 */
190 ASMOutU8(0x72, 0x03);
191 uint8_t bDebugCode = ASMInU8(0x73);
192 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
193 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
194#endif
195
196 /*
197 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
198 */
199 int rc = vmmInitFormatTypes();
200 if (RT_SUCCESS(rc))
201 {
202 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
203 rc = GVMMR0Init();
204 if (RT_SUCCESS(rc))
205 {
206 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
207 rc = GMMR0Init();
208 if (RT_SUCCESS(rc))
209 {
210 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
211 rc = HMR0Init();
212 if (RT_SUCCESS(rc))
213 {
214 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
215 rc = PGMRegisterStringFormatTypes();
216 if (RT_SUCCESS(rc))
217 {
218 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
219#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
220 rc = PGMR0DynMapInit();
221#endif
222 if (RT_SUCCESS(rc))
223 {
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225 rc = IntNetR0Init();
226 if (RT_SUCCESS(rc))
227 {
228#ifdef VBOX_WITH_PCI_PASSTHROUGH
229 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
230 rc = PciRawR0Init();
231#endif
232 if (RT_SUCCESS(rc))
233 {
234 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
235 rc = CPUMR0ModuleInit();
236 if (RT_SUCCESS(rc))
237 {
238#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
239 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
240 rc = vmmR0TripleFaultHackInit();
241 if (RT_SUCCESS(rc))
242#endif
243 {
244 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
245 if (RT_SUCCESS(rc))
246 {
247 g_rcRawModeUsability = SUPR0GetRawModeUsability();
248 if (g_rcRawModeUsability != VINF_SUCCESS)
249 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
250 g_rcRawModeUsability);
251 LogFlow(("ModuleInit: returns success\n"));
252 return VINF_SUCCESS;
253 }
254 }
255
256 /*
257 * Bail out.
258 */
259#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
260 vmmR0TripleFaultHackTerm();
261#endif
262 }
263 else
264 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
265#ifdef VBOX_WITH_PCI_PASSTHROUGH
266 PciRawR0Term();
267#endif
268 }
269 else
270 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
271 IntNetR0Term();
272 }
273 else
274 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
275#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
276 PGMR0DynMapTerm();
277#endif
278 }
279 else
280 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
281 PGMDeregisterStringFormatTypes();
282 }
283 else
284 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
285 HMR0Term();
286 }
287 else
288 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
289 GMMR0Term();
290 }
291 else
292 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
293 GVMMR0Term();
294 }
295 else
296 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
297 vmmTermFormatTypes();
298 }
299 else
300 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
301
302 LogFlow(("ModuleInit: failed %Rrc\n", rc));
303 return rc;
304}
305
306
307/**
308 * Terminate the module.
309 * This is called when we're finally unloaded.
310 *
311 * @param hMod Image handle for use in APIs.
312 */
313DECLEXPORT(void) ModuleTerm(void *hMod)
314{
315 NOREF(hMod);
316 LogFlow(("ModuleTerm:\n"));
317
318 /*
319 * Terminate the CPUM module (Local APIC cleanup).
320 */
321 CPUMR0ModuleTerm();
322
323 /*
324 * Terminate the internal network service.
325 */
326 IntNetR0Term();
327
328 /*
329 * PGM (Darwin), HM and PciRaw global cleanup.
330 */
331#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
332 PGMR0DynMapTerm();
333#endif
334#ifdef VBOX_WITH_PCI_PASSTHROUGH
335 PciRawR0Term();
336#endif
337 PGMDeregisterStringFormatTypes();
338 HMR0Term();
339#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
340 vmmR0TripleFaultHackTerm();
341#endif
342
343 /*
344 * Destroy the GMM and GVMM instances.
345 */
346 GMMR0Term();
347 GVMMR0Term();
348
349 vmmTermFormatTypes();
350
351 LogFlow(("ModuleTerm: returns\n"));
352}
353
354
355/**
356 * Initiates the R0 driver for a particular VM instance.
357 *
358 * @returns VBox status code.
359 *
360 * @param pGVM The global (ring-0) VM structure.
361 * @param pVM The cross context VM structure.
362 * @param uSvnRev The SVN revision of the ring-3 part.
363 * @param uBuildType Build type indicator.
364 * @thread EMT(0)
365 */
366static int vmmR0InitVM(PGVM pGVM, PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
367{
368 VMM_CHECK_SMAP_SETUP();
369 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
370
371 /*
372 * Match the SVN revisions and build type.
373 */
374 if (uSvnRev != VMMGetSvnRev())
375 {
376 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
377 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
378 return VERR_VMM_R0_VERSION_MISMATCH;
379 }
380 if (uBuildType != vmmGetBuildType())
381 {
382 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
383 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
384 return VERR_VMM_R0_VERSION_MISMATCH;
385 }
386
387 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
388 if (RT_FAILURE(rc))
389 return rc;
390
391
392#ifdef LOG_ENABLED
393 /*
394 * Register the EMT R0 logger instance for VCPU 0.
395 */
396 PVMCPU pVCpu = &pVM->aCpus[0];
397
398 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
399 if (pR0Logger)
400 {
401# if 0 /* testing of the logger. */
402 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
403 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
404 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
405 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
406
407 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
408 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
409 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
410 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
411
412 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
413 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
414 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
415 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
416
417 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
418 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
419 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
420 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
421 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
422 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
423
424 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
425 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
426
427 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
428 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
429 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
430# endif
431 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
432 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
433 pR0Logger->fRegistered = true;
434 }
435#endif /* LOG_ENABLED */
436
437 /*
438 * Check if the host supports high resolution timers or not.
439 */
440 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
441 && !RTTimerCanDoHighResolution())
442 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
443
444 /*
445 * Initialize the per VM data for GVMM and GMM.
446 */
447 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
448 rc = GVMMR0InitVM(pGVM);
449// if (RT_SUCCESS(rc))
450// rc = GMMR0InitPerVMData(pVM);
451 if (RT_SUCCESS(rc))
452 {
453 /*
454 * Init HM, CPUM and PGM (Darwin only).
455 */
456 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
457 rc = HMR0InitVM(pVM);
458 if (RT_SUCCESS(rc))
459 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
460 if (RT_SUCCESS(rc))
461 {
462 rc = CPUMR0InitVM(pVM);
463 if (RT_SUCCESS(rc))
464 {
465 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
466#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
467 rc = PGMR0DynMapInitVM(pVM);
468#endif
469 if (RT_SUCCESS(rc))
470 {
471 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
472#ifdef VBOX_WITH_PCI_PASSTHROUGH
473 rc = PciRawR0InitVM(pGVM, pVM);
474#endif
475 if (RT_SUCCESS(rc))
476 {
477 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
478 rc = GIMR0InitVM(pVM);
479 if (RT_SUCCESS(rc))
480 {
481 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
482 if (RT_SUCCESS(rc))
483 {
484 GVMMR0DoneInitVM(pGVM);
485 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
486 return rc;
487 }
488
489 /* bail out*/
490 GIMR0TermVM(pVM);
491 }
492#ifdef VBOX_WITH_PCI_PASSTHROUGH
493 PciRawR0TermVM(pGVM, pVM);
494#endif
495 }
496 }
497 }
498 HMR0TermVM(pVM);
499 }
500 }
501
502 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
503 return rc;
504}
505
506
507/**
508 * Terminates the R0 bits for a particular VM instance.
509 *
510 * This is normally called by ring-3 as part of the VM termination process, but
511 * may alternatively be called during the support driver session cleanup when
512 * the VM object is destroyed (see GVMM).
513 *
514 * @returns VBox status code.
515 *
516 * @param pGVM The global (ring-0) VM structure.
517 * @param pVM The cross context VM structure.
518 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
519 * thread.
520 * @thread EMT(0) or session clean up thread.
521 */
522VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu)
523{
524 /*
525 * Check EMT(0) claim if we're called from userland.
526 */
527 if (idCpu != NIL_VMCPUID)
528 {
529 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
530 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
531 if (RT_FAILURE(rc))
532 return rc;
533 }
534
535#ifdef VBOX_WITH_PCI_PASSTHROUGH
536 PciRawR0TermVM(pGVM, pVM);
537#endif
538
539 /*
540 * Tell GVMM what we're up to and check that we only do this once.
541 */
542 if (GVMMR0DoingTermVM(pGVM))
543 {
544 GIMR0TermVM(pVM);
545
546 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
547 * here to make sure we don't leak any shared pages if we crash... */
548#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
549 PGMR0DynMapTermVM(pVM);
550#endif
551 HMR0TermVM(pVM);
552 }
553
554 /*
555 * Deregister the logger.
556 */
557 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
558 return VINF_SUCCESS;
559}
560
561
562/**
563 * VMM ring-0 thread-context callback.
564 *
565 * This does common HM state updating and calls the HM-specific thread-context
566 * callback.
567 *
568 * @param enmEvent The thread-context event.
569 * @param pvUser Opaque pointer to the VMCPU.
570 *
571 * @thread EMT(pvUser)
572 */
573static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
574{
575 PVMCPU pVCpu = (PVMCPU)pvUser;
576
577 switch (enmEvent)
578 {
579 case RTTHREADCTXEVENT_IN:
580 {
581 /*
582 * Linux may call us with preemption enabled (really!) but technically we
583 * cannot get preempted here, otherwise we end up in an infinite recursion
584 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
585 * ad infinitum). Let's just disable preemption for now...
586 */
587 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
588 * preemption after doing the callout (one or two functions up the
589 * call chain). */
590 /** @todo r=ramshankar: See @bugref{5313#c30}. */
591 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
592 RTThreadPreemptDisable(&ParanoidPreemptState);
593
594 /* We need to update the VCPU <-> host CPU mapping. */
595 RTCPUID idHostCpu;
596 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
597 pVCpu->iHostCpuSet = iHostCpuSet;
598 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
599
600 /* In the very unlikely event that the GIP delta for the CPU we're
601 rescheduled needs calculating, try force a return to ring-3.
602 We unfortunately cannot do the measurements right here. */
603 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
604 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
605
606 /* Invoke the HM-specific thread-context callback. */
607 HMR0ThreadCtxCallback(enmEvent, pvUser);
608
609 /* Restore preemption. */
610 RTThreadPreemptRestore(&ParanoidPreemptState);
611 break;
612 }
613
614 case RTTHREADCTXEVENT_OUT:
615 {
616 /* Invoke the HM-specific thread-context callback. */
617 HMR0ThreadCtxCallback(enmEvent, pvUser);
618
619 /*
620 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
621 * have the same host CPU associated with it.
622 */
623 pVCpu->iHostCpuSet = UINT32_MAX;
624 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
625 break;
626 }
627
628 default:
629 /* Invoke the HM-specific thread-context callback. */
630 HMR0ThreadCtxCallback(enmEvent, pvUser);
631 break;
632 }
633}
634
635
636/**
637 * Creates thread switching hook for the current EMT thread.
638 *
639 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
640 * platform does not implement switcher hooks, no hooks will be create and the
641 * member set to NIL_RTTHREADCTXHOOK.
642 *
643 * @returns VBox status code.
644 * @param pVCpu The cross context virtual CPU structure.
645 * @thread EMT(pVCpu)
646 */
647VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
648{
649 VMCPU_ASSERT_EMT(pVCpu);
650 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
651
652#if 1 /* To disable this stuff change to zero. */
653 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
654 if (RT_SUCCESS(rc))
655 return rc;
656#else
657 RT_NOREF(vmmR0ThreadCtxCallback);
658 int rc = VERR_NOT_SUPPORTED;
659#endif
660
661 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
662 if (rc == VERR_NOT_SUPPORTED)
663 return VINF_SUCCESS;
664
665 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
666 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
667}
668
669
670/**
671 * Destroys the thread switching hook for the specified VCPU.
672 *
673 * @param pVCpu The cross context virtual CPU structure.
674 * @remarks Can be called from any thread.
675 */
676VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
677{
678 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
679 AssertRC(rc);
680 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
681}
682
683
684/**
685 * Disables the thread switching hook for this VCPU (if we got one).
686 *
687 * @param pVCpu The cross context virtual CPU structure.
688 * @thread EMT(pVCpu)
689 *
690 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
691 * this call. This means you have to be careful with what you do!
692 */
693VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
694{
695 /*
696 * Clear the VCPU <-> host CPU mapping as we've left HM context.
697 * @bugref{7726#c19} explains the need for this trick:
698 *
699 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
700 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
701 * longjmp & normal return to ring-3, which opens a window where we may be
702 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
703 * the CPU starts executing a different EMT. Both functions first disables
704 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
705 * an opening for getting preempted.
706 */
707 /** @todo Make HM not need this API! Then we could leave the hooks enabled
708 * all the time. */
709 /** @todo move this into the context hook disabling if(). */
710 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
711
712 /*
713 * Disable the context hook, if we got one.
714 */
715 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
716 {
717 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
718 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
719 AssertRC(rc);
720 }
721}
722
723
724/**
725 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
726 *
727 * @returns true if registered, false otherwise.
728 * @param pVCpu The cross context virtual CPU structure.
729 */
730DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
731{
732 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
733}
734
735
736/**
737 * Whether thread-context hooks are registered for this VCPU.
738 *
739 * @returns true if registered, false otherwise.
740 * @param pVCpu The cross context virtual CPU structure.
741 */
742VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
743{
744 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
745}
746
747
748#ifdef VBOX_WITH_STATISTICS
749/**
750 * Record return code statistics
751 * @param pVM The cross context VM structure.
752 * @param pVCpu The cross context virtual CPU structure.
753 * @param rc The status code.
754 */
755static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
756{
757 /*
758 * Collect statistics.
759 */
760 switch (rc)
761 {
762 case VINF_SUCCESS:
763 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
764 break;
765 case VINF_EM_RAW_INTERRUPT:
766 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
767 break;
768 case VINF_EM_RAW_INTERRUPT_HYPER:
769 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
770 break;
771 case VINF_EM_RAW_GUEST_TRAP:
772 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
773 break;
774 case VINF_EM_RAW_RING_SWITCH:
775 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
776 break;
777 case VINF_EM_RAW_RING_SWITCH_INT:
778 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
779 break;
780 case VINF_EM_RAW_STALE_SELECTOR:
781 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
782 break;
783 case VINF_EM_RAW_IRET_TRAP:
784 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
785 break;
786 case VINF_IOM_R3_IOPORT_READ:
787 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
788 break;
789 case VINF_IOM_R3_IOPORT_WRITE:
790 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
791 break;
792 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
793 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
794 break;
795 case VINF_IOM_R3_MMIO_READ:
796 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
797 break;
798 case VINF_IOM_R3_MMIO_WRITE:
799 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
800 break;
801 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
802 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
803 break;
804 case VINF_IOM_R3_MMIO_READ_WRITE:
805 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
806 break;
807 case VINF_PATM_HC_MMIO_PATCH_READ:
808 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
809 break;
810 case VINF_PATM_HC_MMIO_PATCH_WRITE:
811 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
812 break;
813 case VINF_CPUM_R3_MSR_READ:
814 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
815 break;
816 case VINF_CPUM_R3_MSR_WRITE:
817 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
818 break;
819 case VINF_EM_RAW_EMULATE_INSTR:
820 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
821 break;
822 case VINF_EM_RAW_EMULATE_IO_BLOCK:
823 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
824 break;
825 case VINF_PATCH_EMULATE_INSTR:
826 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
827 break;
828 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
829 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
830 break;
831 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
832 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
833 break;
834 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
835 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
836 break;
837 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
838 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
839 break;
840 case VINF_CSAM_PENDING_ACTION:
841 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
842 break;
843 case VINF_PGM_SYNC_CR3:
844 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
845 break;
846 case VINF_PATM_PATCH_INT3:
847 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
848 break;
849 case VINF_PATM_PATCH_TRAP_PF:
850 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
851 break;
852 case VINF_PATM_PATCH_TRAP_GP:
853 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
854 break;
855 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
856 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
857 break;
858 case VINF_EM_RESCHEDULE_REM:
859 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
860 break;
861 case VINF_EM_RAW_TO_R3:
862 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
863 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
864 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
865 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
866 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
867 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
868 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
869 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
870 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
871 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
872 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
873 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
874 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
875 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
876 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
877 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
878 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
879 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
880 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
881 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
882 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
883 else
884 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
885 break;
886
887 case VINF_EM_RAW_TIMER_PENDING:
888 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
889 break;
890 case VINF_EM_RAW_INTERRUPT_PENDING:
891 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
892 break;
893 case VINF_VMM_CALL_HOST:
894 switch (pVCpu->vmm.s.enmCallRing3Operation)
895 {
896 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
897 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
898 break;
899 case VMMCALLRING3_PDM_LOCK:
900 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
901 break;
902 case VMMCALLRING3_PGM_POOL_GROW:
903 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
904 break;
905 case VMMCALLRING3_PGM_LOCK:
906 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
907 break;
908 case VMMCALLRING3_PGM_MAP_CHUNK:
909 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
910 break;
911 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
912 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
913 break;
914 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
915 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
916 break;
917 case VMMCALLRING3_VMM_LOGGER_FLUSH:
918 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
919 break;
920 case VMMCALLRING3_VM_SET_ERROR:
921 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
922 break;
923 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
924 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
925 break;
926 case VMMCALLRING3_VM_R0_ASSERTION:
927 default:
928 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
929 break;
930 }
931 break;
932 case VINF_PATM_DUPLICATE_FUNCTION:
933 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
934 break;
935 case VINF_PGM_CHANGE_MODE:
936 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
937 break;
938 case VINF_PGM_POOL_FLUSH_PENDING:
939 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
940 break;
941 case VINF_EM_PENDING_REQUEST:
942 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
943 break;
944 case VINF_EM_HM_PATCH_TPR_INSTR:
945 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
946 break;
947 default:
948 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
949 break;
950 }
951}
952#endif /* VBOX_WITH_STATISTICS */
953
954
955/**
956 * The Ring 0 entry point, called by the fast-ioctl path.
957 *
958 * @param pGVM The global (ring-0) VM structure.
959 * @param pVM The cross context VM structure.
960 * The return code is stored in pVM->vmm.s.iLastGZRc.
961 * @param idCpu The Virtual CPU ID of the calling EMT.
962 * @param enmOperation Which operation to execute.
963 * @remarks Assume called with interrupts _enabled_.
964 */
965VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
966{
967 /*
968 * Validation.
969 */
970 if ( idCpu < pGVM->cCpus
971 && pGVM->cCpus == pVM->cCpus)
972 { /*likely*/ }
973 else
974 {
975 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
976 return;
977 }
978
979 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
980 PVMCPU pVCpu = &pVM->aCpus[idCpu];
981 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
982 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
983 && pVCpu->hNativeThreadR0 == hNativeThread))
984 { /* likely */ }
985 else
986 {
987 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
988 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
989 return;
990 }
991
992 /*
993 * SMAP fun.
994 */
995 VMM_CHECK_SMAP_SETUP();
996 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
997
998 /*
999 * Perform requested operation.
1000 */
1001 switch (enmOperation)
1002 {
1003 /*
1004 * Switch to GC and run guest raw mode code.
1005 * Disable interrupts before doing the world switch.
1006 */
1007 case VMMR0_DO_RAW_RUN:
1008 {
1009#ifdef VBOX_WITH_RAW_MODE
1010# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1011 /* Some safety precautions first. */
1012 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1013 {
1014 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
1015 break;
1016 }
1017# endif
1018 if (RT_SUCCESS(g_rcRawModeUsability))
1019 { /* likely */ }
1020 else
1021 {
1022 pVCpu->vmm.s.iLastGZRc = g_rcRawModeUsability;
1023 break;
1024 }
1025
1026 /*
1027 * Disable preemption.
1028 */
1029 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1030 RTThreadPreemptDisable(&PreemptState);
1031
1032 /*
1033 * Get the host CPU identifiers, make sure they are valid and that
1034 * we've got a TSC delta for the CPU.
1035 */
1036 RTCPUID idHostCpu;
1037 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1038 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1039 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1040 {
1041 /*
1042 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
1043 */
1044# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1045 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1046# endif
1047 pVCpu->iHostCpuSet = iHostCpuSet;
1048 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1049
1050 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1051 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1052
1053 /*
1054 * We might need to disable VT-x if the active switcher turns off paging.
1055 */
1056 bool fVTxDisabled;
1057 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1058 if (RT_SUCCESS(rc))
1059 {
1060 /*
1061 * Disable interrupts and run raw-mode code. The loop is for efficiently
1062 * dispatching tracepoints that fired in raw-mode context.
1063 */
1064 RTCCUINTREG uFlags = ASMIntDisableFlags();
1065
1066 for (;;)
1067 {
1068 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1069 TMNotifyStartOfExecution(pVCpu);
1070
1071 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1072 pVCpu->vmm.s.iLastGZRc = rc;
1073
1074 TMNotifyEndOfExecution(pVCpu);
1075 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1076
1077 if (rc != VINF_VMM_CALL_TRACER)
1078 break;
1079 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1080 }
1081
1082 /*
1083 * Re-enable VT-x before we dispatch any pending host interrupts and
1084 * re-enables interrupts.
1085 */
1086 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1087
1088 if ( rc == VINF_EM_RAW_INTERRUPT
1089 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1090 TRPMR0DispatchHostInterrupt(pVM);
1091
1092 ASMSetFlags(uFlags);
1093
1094 /* Fire dtrace probe and collect statistics. */
1095 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1096# ifdef VBOX_WITH_STATISTICS
1097 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1098 vmmR0RecordRC(pVM, pVCpu, rc);
1099# endif
1100 }
1101 else
1102 pVCpu->vmm.s.iLastGZRc = rc;
1103
1104 /*
1105 * Invalidate the host CPU identifiers as we restore preemption.
1106 */
1107 pVCpu->iHostCpuSet = UINT32_MAX;
1108 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1109
1110 RTThreadPreemptRestore(&PreemptState);
1111 }
1112 /*
1113 * Invalid CPU set index or TSC delta in need of measuring.
1114 */
1115 else
1116 {
1117 RTThreadPreemptRestore(&PreemptState);
1118 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1119 {
1120 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1121 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1122 0 /*default cTries*/);
1123 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1124 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1125 else
1126 pVCpu->vmm.s.iLastGZRc = rc;
1127 }
1128 else
1129 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1130 }
1131
1132#else /* !VBOX_WITH_RAW_MODE */
1133 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1134#endif
1135 break;
1136 }
1137
1138 /*
1139 * Run guest code using the available hardware acceleration technology.
1140 */
1141 case VMMR0_DO_HM_RUN:
1142 {
1143 /*
1144 * Disable preemption.
1145 */
1146 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1147 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1148 RTThreadPreemptDisable(&PreemptState);
1149
1150 /*
1151 * Get the host CPU identifiers, make sure they are valid and that
1152 * we've got a TSC delta for the CPU.
1153 */
1154 RTCPUID idHostCpu;
1155 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1156 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1157 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1158 {
1159 pVCpu->iHostCpuSet = iHostCpuSet;
1160 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1161
1162 /*
1163 * Update the periodic preemption timer if it's active.
1164 */
1165 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1166 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1167 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1168
1169#ifdef LOG_ENABLED
1170 /*
1171 * Ugly: Lazy registration of ring 0 loggers.
1172 */
1173 if (pVCpu->idCpu > 0)
1174 {
1175 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1176 if ( pR0Logger
1177 && RT_UNLIKELY(!pR0Logger->fRegistered))
1178 {
1179 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1180 pR0Logger->fRegistered = true;
1181 }
1182 }
1183#endif
1184
1185#ifdef VMM_R0_TOUCH_FPU
1186 /*
1187 * Make sure we've got the FPU state loaded so and we don't need to clear
1188 * CR0.TS and get out of sync with the host kernel when loading the guest
1189 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1190 */
1191 CPUMR0TouchHostFpu();
1192#endif
1193 int rc;
1194 bool fPreemptRestored = false;
1195 if (!HMR0SuspendPending())
1196 {
1197 /*
1198 * Enable the context switching hook.
1199 */
1200 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1201 {
1202 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1203 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1204 }
1205
1206 /*
1207 * Enter HM context.
1208 */
1209 rc = HMR0Enter(pVM, pVCpu);
1210 if (RT_SUCCESS(rc))
1211 {
1212 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1213
1214 /*
1215 * When preemption hooks are in place, enable preemption now that
1216 * we're in HM context.
1217 */
1218 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1219 {
1220 fPreemptRestored = true;
1221 RTThreadPreemptRestore(&PreemptState);
1222 }
1223
1224 /*
1225 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1226 */
1227 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1228 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1229 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1230
1231 /*
1232 * Assert sanity on the way out. Using manual assertions code here as normal
1233 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1234 */
1235 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1236 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1237 {
1238 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1239 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1240 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1241 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1242 }
1243 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1244 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1245 {
1246 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1247 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1248 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1249 rc = VERR_INVALID_STATE;
1250 }
1251
1252 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1253 }
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1255
1256 /*
1257 * Invalidate the host CPU identifiers before we disable the context
1258 * hook / restore preemption.
1259 */
1260 pVCpu->iHostCpuSet = UINT32_MAX;
1261 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1262
1263 /*
1264 * Disable context hooks. Due to unresolved cleanup issues, we
1265 * cannot leave the hooks enabled when we return to ring-3.
1266 *
1267 * Note! At the moment HM may also have disabled the hook
1268 * when we get here, but the IPRT API handles that.
1269 */
1270 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1271 {
1272 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1273 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1274 }
1275 }
1276 /*
1277 * The system is about to go into suspend mode; go back to ring 3.
1278 */
1279 else
1280 {
1281 rc = VINF_EM_RAW_INTERRUPT;
1282 pVCpu->iHostCpuSet = UINT32_MAX;
1283 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1284 }
1285
1286 /** @todo When HM stops messing with the context hook state, we'll disable
1287 * preemption again before the RTThreadCtxHookDisable call. */
1288 if (!fPreemptRestored)
1289 RTThreadPreemptRestore(&PreemptState);
1290
1291 pVCpu->vmm.s.iLastGZRc = rc;
1292
1293 /* Fire dtrace probe and collect statistics. */
1294 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1295#ifdef VBOX_WITH_STATISTICS
1296 vmmR0RecordRC(pVM, pVCpu, rc);
1297#endif
1298 }
1299 /*
1300 * Invalid CPU set index or TSC delta in need of measuring.
1301 */
1302 else
1303 {
1304 pVCpu->iHostCpuSet = UINT32_MAX;
1305 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1306 RTThreadPreemptRestore(&PreemptState);
1307 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1308 {
1309 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1310 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1311 0 /*default cTries*/);
1312 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1313 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1314 else
1315 pVCpu->vmm.s.iLastGZRc = rc;
1316 }
1317 else
1318 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1319 }
1320 break;
1321 }
1322
1323 /*
1324 * For profiling.
1325 */
1326 case VMMR0_DO_NOP:
1327 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1328 break;
1329
1330 /*
1331 * Shouldn't happen.
1332 */
1333 default:
1334 AssertMsgFailed(("%#x\n", enmOperation));
1335 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1336 break;
1337 }
1338 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1339}
1340
1341
1342/**
1343 * Validates a session or VM session argument.
1344 *
1345 * @returns true / false accordingly.
1346 * @param pVM The cross context VM structure.
1347 * @param pClaimedSession The session claim to validate.
1348 * @param pSession The session argument.
1349 */
1350DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1351{
1352 /* This must be set! */
1353 if (!pSession)
1354 return false;
1355
1356 /* Only one out of the two. */
1357 if (pVM && pClaimedSession)
1358 return false;
1359 if (pVM)
1360 pClaimedSession = pVM->pSession;
1361 return pClaimedSession == pSession;
1362}
1363
1364
1365/**
1366 * VMMR0EntryEx worker function, either called directly or when ever possible
1367 * called thru a longjmp so we can exit safely on failure.
1368 *
1369 * @returns VBox status code.
1370 * @param pGVM The global (ring-0) VM structure.
1371 * @param pVM The cross context VM structure.
1372 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1373 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1374 * @param enmOperation Which operation to execute.
1375 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1376 * The support driver validates this if it's present.
1377 * @param u64Arg Some simple constant argument.
1378 * @param pSession The session of the caller.
1379 *
1380 * @remarks Assume called with interrupts _enabled_.
1381 */
1382static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1383 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1384{
1385 /*
1386 * Validate pGVM, pVM and idCpu for consistency and validity.
1387 */
1388 if ( pGVM != NULL
1389 || pVM != NULL)
1390 {
1391 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1392 && RT_VALID_PTR(pVM)
1393 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1394 { /* likely */ }
1395 else
1396 {
1397 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1398 return VERR_INVALID_POINTER;
1399 }
1400
1401 if (RT_LIKELY(pGVM->pVM == pVM))
1402 { /* likely */ }
1403 else
1404 {
1405 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1406 return VERR_INVALID_PARAMETER;
1407 }
1408
1409 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1410 { /* likely */ }
1411 else
1412 {
1413 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1414 return VERR_INVALID_PARAMETER;
1415 }
1416
1417 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1418 && pVM->enmVMState <= VMSTATE_TERMINATED
1419 && pVM->cCpus == pGVM->cCpus
1420 && pVM->pSession == pSession
1421 && pVM->pVMR0 == pVM))
1422 { /* likely */ }
1423 else
1424 {
1425 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1426 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1427 return VERR_INVALID_POINTER;
1428 }
1429 }
1430 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1431 { /* likely */ }
1432 else
1433 {
1434 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1435 return VERR_INVALID_PARAMETER;
1436 }
1437
1438 /*
1439 * SMAP fun.
1440 */
1441 VMM_CHECK_SMAP_SETUP();
1442 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1443
1444 /*
1445 * Process the request.
1446 */
1447 int rc;
1448 switch (enmOperation)
1449 {
1450 /*
1451 * GVM requests
1452 */
1453 case VMMR0_DO_GVMM_CREATE_VM:
1454 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1455 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1456 else
1457 rc = VERR_INVALID_PARAMETER;
1458 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1459 break;
1460
1461 case VMMR0_DO_GVMM_DESTROY_VM:
1462 if (pReqHdr == NULL && u64Arg == 0)
1463 rc = GVMMR0DestroyVM(pGVM, pVM);
1464 else
1465 rc = VERR_INVALID_PARAMETER;
1466 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1467 break;
1468
1469 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1470 if (pGVM != NULL && pVM != NULL)
1471 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1472 else
1473 rc = VERR_INVALID_PARAMETER;
1474 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1475 break;
1476
1477 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1478 if (pGVM != NULL && pVM != NULL)
1479 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1480 else
1481 rc = VERR_INVALID_PARAMETER;
1482 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1483 break;
1484
1485 case VMMR0_DO_GVMM_SCHED_HALT:
1486 if (pReqHdr)
1487 return VERR_INVALID_PARAMETER;
1488 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1489 rc = GVMMR0SchedHalt(pGVM, pVM, idCpu, u64Arg);
1490 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1491 break;
1492
1493 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1494 if (pReqHdr || u64Arg)
1495 return VERR_INVALID_PARAMETER;
1496 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1497 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1498 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1499 break;
1500
1501 case VMMR0_DO_GVMM_SCHED_POKE:
1502 if (pReqHdr || u64Arg)
1503 return VERR_INVALID_PARAMETER;
1504 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1505 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1506 break;
1507
1508 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1509 if (u64Arg)
1510 return VERR_INVALID_PARAMETER;
1511 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1512 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1513 break;
1514
1515 case VMMR0_DO_GVMM_SCHED_POLL:
1516 if (pReqHdr || u64Arg > 1)
1517 return VERR_INVALID_PARAMETER;
1518 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1519 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1520 break;
1521
1522 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1523 if (u64Arg)
1524 return VERR_INVALID_PARAMETER;
1525 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1526 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1527 break;
1528
1529 case VMMR0_DO_GVMM_RESET_STATISTICS:
1530 if (u64Arg)
1531 return VERR_INVALID_PARAMETER;
1532 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1533 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1534 break;
1535
1536 /*
1537 * Initialize the R0 part of a VM instance.
1538 */
1539 case VMMR0_DO_VMMR0_INIT:
1540 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1541 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1542 break;
1543
1544 /*
1545 * Terminate the R0 part of a VM instance.
1546 */
1547 case VMMR0_DO_VMMR0_TERM:
1548 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1549 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1550 break;
1551
1552 /*
1553 * Attempt to enable hm mode and check the current setting.
1554 */
1555 case VMMR0_DO_HM_ENABLE:
1556 rc = HMR0EnableAllCpus(pVM);
1557 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1558 break;
1559
1560 /*
1561 * Setup the hardware accelerated session.
1562 */
1563 case VMMR0_DO_HM_SETUP_VM:
1564 rc = HMR0SetupVM(pVM);
1565 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1566 break;
1567
1568 /*
1569 * Switch to RC to execute Hypervisor function.
1570 */
1571 case VMMR0_DO_CALL_HYPERVISOR:
1572 {
1573#ifdef VBOX_WITH_RAW_MODE
1574 /*
1575 * Validate input / context.
1576 */
1577 if (RT_UNLIKELY(idCpu != 0))
1578 return VERR_INVALID_CPU_ID;
1579 if (RT_UNLIKELY(pVM->cCpus != 1))
1580 return VERR_INVALID_PARAMETER;
1581 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1582# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1583 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1584 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1585# endif
1586 if (RT_FAILURE(g_rcRawModeUsability))
1587 return g_rcRawModeUsability;
1588
1589 /*
1590 * Disable interrupts.
1591 */
1592 RTCCUINTREG fFlags = ASMIntDisableFlags();
1593
1594 /*
1595 * Get the host CPU identifiers, make sure they are valid and that
1596 * we've got a TSC delta for the CPU.
1597 */
1598 RTCPUID idHostCpu;
1599 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1600 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1601 {
1602 ASMSetFlags(fFlags);
1603 return VERR_INVALID_CPU_INDEX;
1604 }
1605 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1606 {
1607 ASMSetFlags(fFlags);
1608 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1609 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1610 0 /*default cTries*/);
1611 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1612 {
1613 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1614 return rc;
1615 }
1616 }
1617
1618 /*
1619 * Commit the CPU identifiers.
1620 */
1621# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1622 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1623# endif
1624 pVCpu->iHostCpuSet = iHostCpuSet;
1625 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1626
1627 /*
1628 * We might need to disable VT-x if the active switcher turns off paging.
1629 */
1630 bool fVTxDisabled;
1631 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1632 if (RT_SUCCESS(rc))
1633 {
1634 /*
1635 * Go through the wormhole...
1636 */
1637 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1638
1639 /*
1640 * Re-enable VT-x before we dispatch any pending host interrupts.
1641 */
1642 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1643
1644 if ( rc == VINF_EM_RAW_INTERRUPT
1645 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1646 TRPMR0DispatchHostInterrupt(pVM);
1647 }
1648
1649 /*
1650 * Invalidate the host CPU identifiers as we restore interrupts.
1651 */
1652 pVCpu->iHostCpuSet = UINT32_MAX;
1653 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1654 ASMSetFlags(fFlags);
1655
1656#else /* !VBOX_WITH_RAW_MODE */
1657 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1658#endif
1659 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1660 break;
1661 }
1662
1663 /*
1664 * PGM wrappers.
1665 */
1666 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1667 if (idCpu == NIL_VMCPUID)
1668 return VERR_INVALID_CPU_ID;
1669 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1670 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1671 break;
1672
1673 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1674 if (idCpu == NIL_VMCPUID)
1675 return VERR_INVALID_CPU_ID;
1676 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1677 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1678 break;
1679
1680 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1681 if (idCpu == NIL_VMCPUID)
1682 return VERR_INVALID_CPU_ID;
1683 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1684 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1685 break;
1686
1687 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1688 if (idCpu != 0)
1689 return VERR_INVALID_CPU_ID;
1690 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1691 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1692 break;
1693
1694 /*
1695 * GMM wrappers.
1696 */
1697 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1698 if (u64Arg)
1699 return VERR_INVALID_PARAMETER;
1700 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1701 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1702 break;
1703
1704 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1705 if (u64Arg)
1706 return VERR_INVALID_PARAMETER;
1707 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1708 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1709 break;
1710
1711 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1712 if (u64Arg)
1713 return VERR_INVALID_PARAMETER;
1714 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1715 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1716 break;
1717
1718 case VMMR0_DO_GMM_FREE_PAGES:
1719 if (u64Arg)
1720 return VERR_INVALID_PARAMETER;
1721 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1722 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1723 break;
1724
1725 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1726 if (u64Arg)
1727 return VERR_INVALID_PARAMETER;
1728 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1729 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1730 break;
1731
1732 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1733 if (u64Arg)
1734 return VERR_INVALID_PARAMETER;
1735 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1736 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1737 break;
1738
1739 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1740 if (idCpu == NIL_VMCPUID)
1741 return VERR_INVALID_CPU_ID;
1742 if (u64Arg)
1743 return VERR_INVALID_PARAMETER;
1744 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1745 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1746 break;
1747
1748 case VMMR0_DO_GMM_BALLOONED_PAGES:
1749 if (u64Arg)
1750 return VERR_INVALID_PARAMETER;
1751 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1752 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1753 break;
1754
1755 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1756 if (u64Arg)
1757 return VERR_INVALID_PARAMETER;
1758 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1759 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1760 break;
1761
1762 case VMMR0_DO_GMM_SEED_CHUNK:
1763 if (pReqHdr)
1764 return VERR_INVALID_PARAMETER;
1765 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1766 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1767 break;
1768
1769 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1770 if (idCpu == NIL_VMCPUID)
1771 return VERR_INVALID_CPU_ID;
1772 if (u64Arg)
1773 return VERR_INVALID_PARAMETER;
1774 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1775 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1776 break;
1777
1778 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1779 if (idCpu == NIL_VMCPUID)
1780 return VERR_INVALID_CPU_ID;
1781 if (u64Arg)
1782 return VERR_INVALID_PARAMETER;
1783 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1784 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1785 break;
1786
1787 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1788 if (idCpu == NIL_VMCPUID)
1789 return VERR_INVALID_CPU_ID;
1790 if ( u64Arg
1791 || pReqHdr)
1792 return VERR_INVALID_PARAMETER;
1793 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1794 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1795 break;
1796
1797#ifdef VBOX_WITH_PAGE_SHARING
1798 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1799 {
1800 if (idCpu == NIL_VMCPUID)
1801 return VERR_INVALID_CPU_ID;
1802 if ( u64Arg
1803 || pReqHdr)
1804 return VERR_INVALID_PARAMETER;
1805 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1806 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1807 break;
1808 }
1809#endif
1810
1811#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1812 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1813 if (u64Arg)
1814 return VERR_INVALID_PARAMETER;
1815 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1816 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1817 break;
1818#endif
1819
1820 case VMMR0_DO_GMM_QUERY_STATISTICS:
1821 if (u64Arg)
1822 return VERR_INVALID_PARAMETER;
1823 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1824 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1825 break;
1826
1827 case VMMR0_DO_GMM_RESET_STATISTICS:
1828 if (u64Arg)
1829 return VERR_INVALID_PARAMETER;
1830 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1831 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1832 break;
1833
1834 /*
1835 * A quick GCFGM mock-up.
1836 */
1837 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1838 case VMMR0_DO_GCFGM_SET_VALUE:
1839 case VMMR0_DO_GCFGM_QUERY_VALUE:
1840 {
1841 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1842 return VERR_INVALID_PARAMETER;
1843 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1844 if (pReq->Hdr.cbReq != sizeof(*pReq))
1845 return VERR_INVALID_PARAMETER;
1846 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1847 {
1848 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1849 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1850 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1851 }
1852 else
1853 {
1854 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1855 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1856 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1857 }
1858 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1859 break;
1860 }
1861
1862 /*
1863 * PDM Wrappers.
1864 */
1865 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1866 {
1867 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1868 return VERR_INVALID_PARAMETER;
1869 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1870 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1871 break;
1872 }
1873
1874 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1875 {
1876 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1877 return VERR_INVALID_PARAMETER;
1878 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1879 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1880 break;
1881 }
1882
1883 /*
1884 * Requests to the internal networking service.
1885 */
1886 case VMMR0_DO_INTNET_OPEN:
1887 {
1888 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1889 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1890 return VERR_INVALID_PARAMETER;
1891 rc = IntNetR0OpenReq(pSession, pReq);
1892 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1893 break;
1894 }
1895
1896 case VMMR0_DO_INTNET_IF_CLOSE:
1897 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1898 return VERR_INVALID_PARAMETER;
1899 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1900 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1901 break;
1902
1903
1904 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1905 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1906 return VERR_INVALID_PARAMETER;
1907 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1908 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1909 break;
1910
1911 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1912 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1913 return VERR_INVALID_PARAMETER;
1914 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1915 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1916 break;
1917
1918 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1919 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1920 return VERR_INVALID_PARAMETER;
1921 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1922 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1923 break;
1924
1925 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1926 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1927 return VERR_INVALID_PARAMETER;
1928 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1929 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1930 break;
1931
1932 case VMMR0_DO_INTNET_IF_SEND:
1933 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1934 return VERR_INVALID_PARAMETER;
1935 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1936 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1937 break;
1938
1939 case VMMR0_DO_INTNET_IF_WAIT:
1940 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1941 return VERR_INVALID_PARAMETER;
1942 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1943 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1944 break;
1945
1946 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1947 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1948 return VERR_INVALID_PARAMETER;
1949 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1950 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1951 break;
1952
1953#ifdef VBOX_WITH_PCI_PASSTHROUGH
1954 /*
1955 * Requests to host PCI driver service.
1956 */
1957 case VMMR0_DO_PCIRAW_REQ:
1958 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1959 return VERR_INVALID_PARAMETER;
1960 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
1961 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1962 break;
1963#endif
1964
1965 /*
1966 * NEM requests.
1967 */
1968#ifdef VBOX_WITH_NEM_R0
1969# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1970 case VMMR0_DO_NEM_INIT_VM:
1971 if (u64Arg || pReqHdr || idCpu != 0)
1972 return VERR_INVALID_PARAMETER;
1973 rc = NEMR0InitVM(pGVM, pVM);
1974 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1975 break;
1976
1977 case VMMR0_DO_NEM_INIT_VM_PART_2:
1978 if (u64Arg || pReqHdr || idCpu != 0)
1979 return VERR_INVALID_PARAMETER;
1980 rc = NEMR0InitVMPart2(pGVM, pVM);
1981 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1982 break;
1983
1984 case VMMR0_DO_NEM_MAP_PAGES:
1985 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
1986 return VERR_INVALID_PARAMETER;
1987 rc = NEMR0MapPages(pGVM, pVM, idCpu);
1988 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1989 break;
1990
1991 case VMMR0_DO_NEM_UNMAP_PAGES:
1992 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
1993 return VERR_INVALID_PARAMETER;
1994 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
1995 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1996 break;
1997
1998 case VMMR0_DO_NEM_EXPORT_STATE:
1999 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2000 return VERR_INVALID_PARAMETER;
2001 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2002 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2003 break;
2004
2005 case VMMR0_DO_NEM_IMPORT_STATE:
2006 if (pReqHdr || idCpu == NIL_VMCPUID)
2007 return VERR_INVALID_PARAMETER;
2008 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2009 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2010 break;
2011# endif
2012#endif
2013
2014 /*
2015 * For profiling.
2016 */
2017 case VMMR0_DO_NOP:
2018 case VMMR0_DO_SLOW_NOP:
2019 return VINF_SUCCESS;
2020
2021 /*
2022 * For testing Ring-0 APIs invoked in this environment.
2023 */
2024 case VMMR0_DO_TESTS:
2025 /** @todo make new test */
2026 return VINF_SUCCESS;
2027
2028
2029#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
2030 case VMMR0_DO_TEST_SWITCHER3264:
2031 if (idCpu == NIL_VMCPUID)
2032 return VERR_INVALID_CPU_ID;
2033 rc = HMR0TestSwitcher3264(pVM);
2034 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2035 break;
2036#endif
2037 default:
2038 /*
2039 * We're returning VERR_NOT_SUPPORT here so we've got something else
2040 * than -1 which the interrupt gate glue code might return.
2041 */
2042 Log(("operation %#x is not supported\n", enmOperation));
2043 return VERR_NOT_SUPPORTED;
2044 }
2045 return rc;
2046}
2047
2048
2049/**
2050 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2051 */
2052typedef struct VMMR0ENTRYEXARGS
2053{
2054 PGVM pGVM;
2055 PVM pVM;
2056 VMCPUID idCpu;
2057 VMMR0OPERATION enmOperation;
2058 PSUPVMMR0REQHDR pReq;
2059 uint64_t u64Arg;
2060 PSUPDRVSESSION pSession;
2061} VMMR0ENTRYEXARGS;
2062/** Pointer to a vmmR0EntryExWrapper argument package. */
2063typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2064
2065/**
2066 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2067 *
2068 * @returns VBox status code.
2069 * @param pvArgs The argument package
2070 */
2071static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2072{
2073 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2074 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2075 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2076 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2077 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2078 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2079 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2080}
2081
2082
2083/**
2084 * The Ring 0 entry point, called by the support library (SUP).
2085 *
2086 * @returns VBox status code.
2087 * @param pGVM The global (ring-0) VM structure.
2088 * @param pVM The cross context VM structure.
2089 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2090 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2091 * @param enmOperation Which operation to execute.
2092 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2093 * @param u64Arg Some simple constant argument.
2094 * @param pSession The session of the caller.
2095 * @remarks Assume called with interrupts _enabled_.
2096 */
2097VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2098 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2099{
2100 /*
2101 * Requests that should only happen on the EMT thread will be
2102 * wrapped in a setjmp so we can assert without causing trouble.
2103 */
2104 if ( pVM != NULL
2105 && pGVM != NULL
2106 && idCpu < pGVM->cCpus
2107 && pVM->pVMR0 != NULL)
2108 {
2109 switch (enmOperation)
2110 {
2111 /* These might/will be called before VMMR3Init. */
2112 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2113 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2114 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2115 case VMMR0_DO_GMM_FREE_PAGES:
2116 case VMMR0_DO_GMM_BALLOONED_PAGES:
2117 /* On the mac we might not have a valid jmp buf, so check these as well. */
2118 case VMMR0_DO_VMMR0_INIT:
2119 case VMMR0_DO_VMMR0_TERM:
2120 {
2121 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2122 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2123 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2124 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2125 && pVCpu->hNativeThreadR0 == hNativeThread))
2126 {
2127 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2128 break;
2129
2130 /** @todo validate this EMT claim... GVM knows. */
2131 VMMR0ENTRYEXARGS Args;
2132 Args.pGVM = pGVM;
2133 Args.pVM = pVM;
2134 Args.idCpu = idCpu;
2135 Args.enmOperation = enmOperation;
2136 Args.pReq = pReq;
2137 Args.u64Arg = u64Arg;
2138 Args.pSession = pSession;
2139 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2140 }
2141 return VERR_VM_THREAD_NOT_EMT;
2142 }
2143
2144 default:
2145 break;
2146 }
2147 }
2148 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2149}
2150
2151
2152/**
2153 * Checks whether we've armed the ring-0 long jump machinery.
2154 *
2155 * @returns @c true / @c false
2156 * @param pVCpu The cross context virtual CPU structure.
2157 * @thread EMT
2158 * @sa VMMIsLongJumpArmed
2159 */
2160VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2161{
2162#ifdef RT_ARCH_X86
2163 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2164 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2165#else
2166 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2167 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2168#endif
2169}
2170
2171
2172/**
2173 * Checks whether we've done a ring-3 long jump.
2174 *
2175 * @returns @c true / @c false
2176 * @param pVCpu The cross context virtual CPU structure.
2177 * @thread EMT
2178 */
2179VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2180{
2181 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2182}
2183
2184
2185/**
2186 * Internal R0 logger worker: Flush logger.
2187 *
2188 * @param pLogger The logger instance to flush.
2189 * @remark This function must be exported!
2190 */
2191VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2192{
2193#ifdef LOG_ENABLED
2194 /*
2195 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2196 * (This is a bit paranoid code.)
2197 */
2198 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2199 if ( !VALID_PTR(pR0Logger)
2200 || !VALID_PTR(pR0Logger + 1)
2201 || pLogger->u32Magic != RTLOGGER_MAGIC)
2202 {
2203# ifdef DEBUG
2204 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2205# endif
2206 return;
2207 }
2208 if (pR0Logger->fFlushingDisabled)
2209 return; /* quietly */
2210
2211 PVM pVM = pR0Logger->pVM;
2212 if ( !VALID_PTR(pVM)
2213 || pVM->pVMR0 != pVM)
2214 {
2215# ifdef DEBUG
2216 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2217# endif
2218 return;
2219 }
2220
2221 PVMCPU pVCpu = VMMGetCpu(pVM);
2222 if (pVCpu)
2223 {
2224 /*
2225 * Check that the jump buffer is armed.
2226 */
2227# ifdef RT_ARCH_X86
2228 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2229 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2230# else
2231 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2232 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2233# endif
2234 {
2235# ifdef DEBUG
2236 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2237# endif
2238 return;
2239 }
2240 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2241 }
2242# ifdef DEBUG
2243 else
2244 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2245# endif
2246#else
2247 NOREF(pLogger);
2248#endif /* LOG_ENABLED */
2249}
2250
2251/**
2252 * Internal R0 logger worker: Custom prefix.
2253 *
2254 * @returns Number of chars written.
2255 *
2256 * @param pLogger The logger instance.
2257 * @param pchBuf The output buffer.
2258 * @param cchBuf The size of the buffer.
2259 * @param pvUser User argument (ignored).
2260 */
2261VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2262{
2263 NOREF(pvUser);
2264#ifdef LOG_ENABLED
2265 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2266 if ( !VALID_PTR(pR0Logger)
2267 || !VALID_PTR(pR0Logger + 1)
2268 || pLogger->u32Magic != RTLOGGER_MAGIC
2269 || cchBuf < 2)
2270 return 0;
2271
2272 static const char s_szHex[17] = "0123456789abcdef";
2273 VMCPUID const idCpu = pR0Logger->idCpu;
2274 pchBuf[1] = s_szHex[ idCpu & 15];
2275 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2276
2277 return 2;
2278#else
2279 NOREF(pLogger); NOREF(pchBuf); NOREF(cchBuf);
2280 return 0;
2281#endif
2282}
2283
2284#ifdef LOG_ENABLED
2285
2286/**
2287 * Disables flushing of the ring-0 debug log.
2288 *
2289 * @param pVCpu The cross context virtual CPU structure.
2290 */
2291VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2292{
2293 if (pVCpu->vmm.s.pR0LoggerR0)
2294 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2295}
2296
2297
2298/**
2299 * Enables flushing of the ring-0 debug log.
2300 *
2301 * @param pVCpu The cross context virtual CPU structure.
2302 */
2303VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2304{
2305 if (pVCpu->vmm.s.pR0LoggerR0)
2306 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2307}
2308
2309
2310/**
2311 * Checks if log flushing is disabled or not.
2312 *
2313 * @param pVCpu The cross context virtual CPU structure.
2314 */
2315VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2316{
2317 if (pVCpu->vmm.s.pR0LoggerR0)
2318 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2319 return true;
2320}
2321#endif /* LOG_ENABLED */
2322
2323/**
2324 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2325 *
2326 * @returns true if the breakpoint should be hit, false if it should be ignored.
2327 */
2328DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2329{
2330#if 0
2331 return true;
2332#else
2333 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2334 if (pVM)
2335 {
2336 PVMCPU pVCpu = VMMGetCpu(pVM);
2337
2338 if (pVCpu)
2339 {
2340#ifdef RT_ARCH_X86
2341 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2342 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2343#else
2344 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2345 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2346#endif
2347 {
2348 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2349 return RT_FAILURE_NP(rc);
2350 }
2351 }
2352 }
2353#ifdef RT_OS_LINUX
2354 return true;
2355#else
2356 return false;
2357#endif
2358#endif
2359}
2360
2361
2362/**
2363 * Override this so we can push it up to ring-3.
2364 *
2365 * @param pszExpr Expression. Can be NULL.
2366 * @param uLine Location line number.
2367 * @param pszFile Location file name.
2368 * @param pszFunction Location function name.
2369 */
2370DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2371{
2372 /*
2373 * To the log.
2374 */
2375 LogAlways(("\n!!R0-Assertion Failed!!\n"
2376 "Expression: %s\n"
2377 "Location : %s(%d) %s\n",
2378 pszExpr, pszFile, uLine, pszFunction));
2379
2380 /*
2381 * To the global VMM buffer.
2382 */
2383 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2384 if (pVM)
2385 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2386 "\n!!R0-Assertion Failed!!\n"
2387 "Expression: %.*s\n"
2388 "Location : %s(%d) %s\n",
2389 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2390 pszFile, uLine, pszFunction);
2391
2392 /*
2393 * Continue the normal way.
2394 */
2395 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2396}
2397
2398
2399/**
2400 * Callback for RTLogFormatV which writes to the ring-3 log port.
2401 * See PFNLOGOUTPUT() for details.
2402 */
2403static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2404{
2405 for (size_t i = 0; i < cbChars; i++)
2406 {
2407 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2408 }
2409
2410 NOREF(pv);
2411 return cbChars;
2412}
2413
2414
2415/**
2416 * Override this so we can push it up to ring-3.
2417 *
2418 * @param pszFormat The format string.
2419 * @param va Arguments.
2420 */
2421DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2422{
2423 va_list vaCopy;
2424
2425 /*
2426 * Push the message to the loggers.
2427 */
2428 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2429 if (pLog)
2430 {
2431 va_copy(vaCopy, va);
2432 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2433 va_end(vaCopy);
2434 }
2435 pLog = RTLogRelGetDefaultInstance();
2436 if (pLog)
2437 {
2438 va_copy(vaCopy, va);
2439 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2440 va_end(vaCopy);
2441 }
2442
2443 /*
2444 * Push it to the global VMM buffer.
2445 */
2446 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2447 if (pVM)
2448 {
2449 va_copy(vaCopy, va);
2450 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2451 va_end(vaCopy);
2452 }
2453
2454 /*
2455 * Continue the normal way.
2456 */
2457 RTAssertMsg2V(pszFormat, va);
2458}
2459
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette