VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 72816

Last change on this file since 72816 was 72805, checked in by vboxsync, 6 years ago

VMM/HM: bugref:9193 Fixes honoring pending VMCPU_FF_HM_UPDATE_CR3 and VMCPU_FF_HM_UPDATE_PAE_PDPES
before re-entering guest execution with VT-x R0 code.
Avoid a couple of VMWRITES because we already have cache's of the values (CR4 Mask, CR0 mask).
Parameter cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 90.8 KB
Line 
1/* $Id: VMMR0.cpp 72805 2018-07-03 04:05:43Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/stam.h>
34#include <VBox/vmm/tm.h>
35#include "VMMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/gvm.h>
38#ifdef VBOX_WITH_PCI_PASSTHROUGH
39# include <VBox/vmm/pdmpci.h>
40#endif
41#include <VBox/vmm/apic.h>
42
43#include <VBox/vmm/gvmm.h>
44#include <VBox/vmm/gmm.h>
45#include <VBox/vmm/gim.h>
46#include <VBox/intnet.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <VBox/version.h>
51#include <VBox/log.h>
52
53#include <iprt/asm-amd64-x86.h>
54#include <iprt/assert.h>
55#include <iprt/crc.h>
56#include <iprt/mp.h>
57#include <iprt/once.h>
58#include <iprt/stdarg.h>
59#include <iprt/string.h>
60#include <iprt/thread.h>
61#include <iprt/timer.h>
62
63#include "dtrace/VBoxVMM.h"
64
65
66#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
67# pragma intrinsic(_AddressOfReturnAddress)
68#endif
69
70#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
71# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
72#endif
73
74
75
76/*********************************************************************************************************************************
77* Defined Constants And Macros *
78*********************************************************************************************************************************/
79/** @def VMM_CHECK_SMAP_SETUP
80 * SMAP check setup. */
81/** @def VMM_CHECK_SMAP_CHECK
82 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
83 * it will be logged and @a a_BadExpr is executed. */
84/** @def VMM_CHECK_SMAP_CHECK2
85 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
86 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
87 * executed. */
88#if defined(VBOX_STRICT) || 1
89# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
90# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
91 do { \
92 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
93 { \
94 RTCCUINTREG fEflCheck = ASMGetFlags(); \
95 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
96 { /* likely */ } \
97 else \
98 { \
99 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
100 a_BadExpr; \
101 } \
102 } \
103 } while (0)
104# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
105 do { \
106 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
107 { \
108 RTCCUINTREG fEflCheck = ASMGetFlags(); \
109 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
110 { /* likely */ } \
111 else \
112 { \
113 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
114 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
115 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
116 a_BadExpr; \
117 } \
118 } \
119 } while (0)
120#else
121# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
122# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
123# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
124#endif
125
126
127/*********************************************************************************************************************************
128* Internal Functions *
129*********************************************************************************************************************************/
130RT_C_DECLS_BEGIN
131#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
132extern uint64_t __udivdi3(uint64_t, uint64_t);
133extern uint64_t __umoddi3(uint64_t, uint64_t);
134#endif
135RT_C_DECLS_END
136
137
138/*********************************************************************************************************************************
139* Global Variables *
140*********************************************************************************************************************************/
141/** Drag in necessary library bits.
142 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
143PFNRT g_VMMR0Deps[] =
144{
145 (PFNRT)RTCrc32,
146 (PFNRT)RTOnce,
147#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
148 (PFNRT)__udivdi3,
149 (PFNRT)__umoddi3,
150#endif
151 NULL
152};
153
154#ifdef RT_OS_SOLARIS
155/* Dependency information for the native solaris loader. */
156extern "C" { char _depends_on[] = "vboxdrv"; }
157#endif
158
159/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
160int g_rcRawModeUsability = VINF_SUCCESS;
161
162
163/**
164 * Initialize the module.
165 * This is called when we're first loaded.
166 *
167 * @returns 0 on success.
168 * @returns VBox status on failure.
169 * @param hMod Image handle for use in APIs.
170 */
171DECLEXPORT(int) ModuleInit(void *hMod)
172{
173 VMM_CHECK_SMAP_SETUP();
174 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
175
176#ifdef VBOX_WITH_DTRACE_R0
177 /*
178 * The first thing to do is register the static tracepoints.
179 * (Deregistration is automatic.)
180 */
181 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
182 if (RT_FAILURE(rc2))
183 return rc2;
184#endif
185 LogFlow(("ModuleInit:\n"));
186
187#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
188 /*
189 * Display the CMOS debug code.
190 */
191 ASMOutU8(0x72, 0x03);
192 uint8_t bDebugCode = ASMInU8(0x73);
193 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
194 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
195#endif
196
197 /*
198 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
199 */
200 int rc = vmmInitFormatTypes();
201 if (RT_SUCCESS(rc))
202 {
203 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
204 rc = GVMMR0Init();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = GMMR0Init();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = HMR0Init();
213 if (RT_SUCCESS(rc))
214 {
215 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
216 rc = PGMRegisterStringFormatTypes();
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
221 rc = PGMR0DynMapInit();
222#endif
223 if (RT_SUCCESS(rc))
224 {
225 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
226 rc = IntNetR0Init();
227 if (RT_SUCCESS(rc))
228 {
229#ifdef VBOX_WITH_PCI_PASSTHROUGH
230 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
231 rc = PciRawR0Init();
232#endif
233 if (RT_SUCCESS(rc))
234 {
235 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
236 rc = CPUMR0ModuleInit();
237 if (RT_SUCCESS(rc))
238 {
239#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
240 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
241 rc = vmmR0TripleFaultHackInit();
242 if (RT_SUCCESS(rc))
243#endif
244 {
245 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
246 if (RT_SUCCESS(rc))
247 {
248 g_rcRawModeUsability = SUPR0GetRawModeUsability();
249 if (g_rcRawModeUsability != VINF_SUCCESS)
250 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
251 g_rcRawModeUsability);
252 LogFlow(("ModuleInit: returns success\n"));
253 return VINF_SUCCESS;
254 }
255 }
256
257 /*
258 * Bail out.
259 */
260#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
261 vmmR0TripleFaultHackTerm();
262#endif
263 }
264 else
265 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
266#ifdef VBOX_WITH_PCI_PASSTHROUGH
267 PciRawR0Term();
268#endif
269 }
270 else
271 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
272 IntNetR0Term();
273 }
274 else
275 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
276#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
277 PGMR0DynMapTerm();
278#endif
279 }
280 else
281 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
282 PGMDeregisterStringFormatTypes();
283 }
284 else
285 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
286 HMR0Term();
287 }
288 else
289 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
290 GMMR0Term();
291 }
292 else
293 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
294 GVMMR0Term();
295 }
296 else
297 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
298 vmmTermFormatTypes();
299 }
300 else
301 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
302
303 LogFlow(("ModuleInit: failed %Rrc\n", rc));
304 return rc;
305}
306
307
308/**
309 * Terminate the module.
310 * This is called when we're finally unloaded.
311 *
312 * @param hMod Image handle for use in APIs.
313 */
314DECLEXPORT(void) ModuleTerm(void *hMod)
315{
316 NOREF(hMod);
317 LogFlow(("ModuleTerm:\n"));
318
319 /*
320 * Terminate the CPUM module (Local APIC cleanup).
321 */
322 CPUMR0ModuleTerm();
323
324 /*
325 * Terminate the internal network service.
326 */
327 IntNetR0Term();
328
329 /*
330 * PGM (Darwin), HM and PciRaw global cleanup.
331 */
332#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
333 PGMR0DynMapTerm();
334#endif
335#ifdef VBOX_WITH_PCI_PASSTHROUGH
336 PciRawR0Term();
337#endif
338 PGMDeregisterStringFormatTypes();
339 HMR0Term();
340#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
341 vmmR0TripleFaultHackTerm();
342#endif
343
344 /*
345 * Destroy the GMM and GVMM instances.
346 */
347 GMMR0Term();
348 GVMMR0Term();
349
350 vmmTermFormatTypes();
351
352 LogFlow(("ModuleTerm: returns\n"));
353}
354
355
356/**
357 * Initiates the R0 driver for a particular VM instance.
358 *
359 * @returns VBox status code.
360 *
361 * @param pGVM The global (ring-0) VM structure.
362 * @param pVM The cross context VM structure.
363 * @param uSvnRev The SVN revision of the ring-3 part.
364 * @param uBuildType Build type indicator.
365 * @thread EMT(0)
366 */
367static int vmmR0InitVM(PGVM pGVM, PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
368{
369 VMM_CHECK_SMAP_SETUP();
370 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
371
372 /*
373 * Match the SVN revisions and build type.
374 */
375 if (uSvnRev != VMMGetSvnRev())
376 {
377 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
378 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
379 return VERR_VMM_R0_VERSION_MISMATCH;
380 }
381 if (uBuildType != vmmGetBuildType())
382 {
383 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
384 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
385 return VERR_VMM_R0_VERSION_MISMATCH;
386 }
387
388 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
389 if (RT_FAILURE(rc))
390 return rc;
391
392#ifdef LOG_ENABLED
393 /*
394 * Register the EMT R0 logger instance for VCPU 0.
395 */
396 PVMCPU pVCpu = &pVM->aCpus[0];
397
398 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
399 if (pR0Logger)
400 {
401# if 0 /* testing of the logger. */
402 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
403 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
404 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
405 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
406
407 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
408 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
409 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
410 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
411
412 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
413 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
414 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
415 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
416
417 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
418 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
419 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
420 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
421 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
422 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
423
424 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
425 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
426
427 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
428 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
429 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
430# endif
431 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
432 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
433 pR0Logger->fRegistered = true;
434 }
435#endif /* LOG_ENABLED */
436
437 /*
438 * Check if the host supports high resolution timers or not.
439 */
440 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
441 && !RTTimerCanDoHighResolution())
442 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
443
444 /*
445 * Initialize the per VM data for GVMM and GMM.
446 */
447 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
448 rc = GVMMR0InitVM(pGVM);
449// if (RT_SUCCESS(rc))
450// rc = GMMR0InitPerVMData(pVM);
451 if (RT_SUCCESS(rc))
452 {
453 /*
454 * Init HM, CPUM and PGM (Darwin only).
455 */
456 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
457 rc = HMR0InitVM(pVM);
458 if (RT_SUCCESS(rc))
459 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
460 if (RT_SUCCESS(rc))
461 {
462 rc = CPUMR0InitVM(pVM);
463 if (RT_SUCCESS(rc))
464 {
465 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
466#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
467 rc = PGMR0DynMapInitVM(pVM);
468#endif
469 if (RT_SUCCESS(rc))
470 {
471 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
472 rc = EMR0InitVM(pGVM, pVM);
473 if (RT_SUCCESS(rc))
474 {
475 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
476#ifdef VBOX_WITH_PCI_PASSTHROUGH
477 rc = PciRawR0InitVM(pGVM, pVM);
478#endif
479 if (RT_SUCCESS(rc))
480 {
481 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
482 rc = GIMR0InitVM(pVM);
483 if (RT_SUCCESS(rc))
484 {
485 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
486 if (RT_SUCCESS(rc))
487 {
488 GVMMR0DoneInitVM(pGVM);
489
490 /*
491 * Collect a bit of info for the VM release log.
492 */
493 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
494 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
495
496 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
497 return rc;
498 }
499
500 /* bail out*/
501 GIMR0TermVM(pVM);
502 }
503#ifdef VBOX_WITH_PCI_PASSTHROUGH
504 PciRawR0TermVM(pGVM, pVM);
505#endif
506 }
507 }
508 }
509 }
510 HMR0TermVM(pVM);
511 }
512 }
513
514 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
515 return rc;
516}
517
518
519/**
520 * Does EMT specific VM initialization.
521 *
522 * @returns VBox status code.
523 * @param pGVM The ring-0 VM structure.
524 * @param pVM The cross context VM structure.
525 * @param idCpu The EMT that's calling.
526 */
527static int vmmR0InitVMEmt(PGVM pGVM, PVM pVM, VMCPUID idCpu)
528{
529 /* Paranoia (caller checked these already). */
530 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
531 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
532
533#ifdef LOG_ENABLED
534 /*
535 * Registration of ring 0 loggers.
536 */
537 PVMCPU pVCpu = &pVM->aCpus[idCpu];
538 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
539 if ( pR0Logger
540 && !pR0Logger->fRegistered)
541 {
542 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
543 pR0Logger->fRegistered = true;
544 }
545#endif
546 RT_NOREF(pVM);
547
548 return VINF_SUCCESS;
549}
550
551
552
553/**
554 * Terminates the R0 bits for a particular VM instance.
555 *
556 * This is normally called by ring-3 as part of the VM termination process, but
557 * may alternatively be called during the support driver session cleanup when
558 * the VM object is destroyed (see GVMM).
559 *
560 * @returns VBox status code.
561 *
562 * @param pGVM The global (ring-0) VM structure.
563 * @param pVM The cross context VM structure.
564 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
565 * thread.
566 * @thread EMT(0) or session clean up thread.
567 */
568VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu)
569{
570 /*
571 * Check EMT(0) claim if we're called from userland.
572 */
573 if (idCpu != NIL_VMCPUID)
574 {
575 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
576 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
577 if (RT_FAILURE(rc))
578 return rc;
579 }
580
581#ifdef VBOX_WITH_PCI_PASSTHROUGH
582 PciRawR0TermVM(pGVM, pVM);
583#endif
584
585 /*
586 * Tell GVMM what we're up to and check that we only do this once.
587 */
588 if (GVMMR0DoingTermVM(pGVM))
589 {
590 GIMR0TermVM(pVM);
591
592 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
593 * here to make sure we don't leak any shared pages if we crash... */
594#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
595 PGMR0DynMapTermVM(pVM);
596#endif
597 HMR0TermVM(pVM);
598 }
599
600 /*
601 * Deregister the logger.
602 */
603 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
604 return VINF_SUCCESS;
605}
606
607
608/**
609 * VMM ring-0 thread-context callback.
610 *
611 * This does common HM state updating and calls the HM-specific thread-context
612 * callback.
613 *
614 * @param enmEvent The thread-context event.
615 * @param pvUser Opaque pointer to the VMCPU.
616 *
617 * @thread EMT(pvUser)
618 */
619static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
620{
621 PVMCPU pVCpu = (PVMCPU)pvUser;
622
623 switch (enmEvent)
624 {
625 case RTTHREADCTXEVENT_IN:
626 {
627 /*
628 * Linux may call us with preemption enabled (really!) but technically we
629 * cannot get preempted here, otherwise we end up in an infinite recursion
630 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
631 * ad infinitum). Let's just disable preemption for now...
632 */
633 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
634 * preemption after doing the callout (one or two functions up the
635 * call chain). */
636 /** @todo r=ramshankar: See @bugref{5313#c30}. */
637 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
638 RTThreadPreemptDisable(&ParanoidPreemptState);
639
640 /* We need to update the VCPU <-> host CPU mapping. */
641 RTCPUID idHostCpu;
642 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
643 pVCpu->iHostCpuSet = iHostCpuSet;
644 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
645
646 /* In the very unlikely event that the GIP delta for the CPU we're
647 rescheduled needs calculating, try force a return to ring-3.
648 We unfortunately cannot do the measurements right here. */
649 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
650 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
651
652 /* Invoke the HM-specific thread-context callback. */
653 HMR0ThreadCtxCallback(enmEvent, pvUser);
654
655 /* Restore preemption. */
656 RTThreadPreemptRestore(&ParanoidPreemptState);
657 break;
658 }
659
660 case RTTHREADCTXEVENT_OUT:
661 {
662 /* Invoke the HM-specific thread-context callback. */
663 HMR0ThreadCtxCallback(enmEvent, pvUser);
664
665 /*
666 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
667 * have the same host CPU associated with it.
668 */
669 pVCpu->iHostCpuSet = UINT32_MAX;
670 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
671 break;
672 }
673
674 default:
675 /* Invoke the HM-specific thread-context callback. */
676 HMR0ThreadCtxCallback(enmEvent, pvUser);
677 break;
678 }
679}
680
681
682/**
683 * Creates thread switching hook for the current EMT thread.
684 *
685 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
686 * platform does not implement switcher hooks, no hooks will be create and the
687 * member set to NIL_RTTHREADCTXHOOK.
688 *
689 * @returns VBox status code.
690 * @param pVCpu The cross context virtual CPU structure.
691 * @thread EMT(pVCpu)
692 */
693VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
694{
695 VMCPU_ASSERT_EMT(pVCpu);
696 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
697
698#if 1 /* To disable this stuff change to zero. */
699 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
700 if (RT_SUCCESS(rc))
701 return rc;
702#else
703 RT_NOREF(vmmR0ThreadCtxCallback);
704 int rc = VERR_NOT_SUPPORTED;
705#endif
706
707 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
708 if (rc == VERR_NOT_SUPPORTED)
709 return VINF_SUCCESS;
710
711 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
712 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
713}
714
715
716/**
717 * Destroys the thread switching hook for the specified VCPU.
718 *
719 * @param pVCpu The cross context virtual CPU structure.
720 * @remarks Can be called from any thread.
721 */
722VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
723{
724 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
725 AssertRC(rc);
726 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
727}
728
729
730/**
731 * Disables the thread switching hook for this VCPU (if we got one).
732 *
733 * @param pVCpu The cross context virtual CPU structure.
734 * @thread EMT(pVCpu)
735 *
736 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
737 * this call. This means you have to be careful with what you do!
738 */
739VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
740{
741 /*
742 * Clear the VCPU <-> host CPU mapping as we've left HM context.
743 * @bugref{7726#c19} explains the need for this trick:
744 *
745 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
746 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
747 * longjmp & normal return to ring-3, which opens a window where we may be
748 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
749 * the CPU starts executing a different EMT. Both functions first disables
750 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
751 * an opening for getting preempted.
752 */
753 /** @todo Make HM not need this API! Then we could leave the hooks enabled
754 * all the time. */
755 /** @todo move this into the context hook disabling if(). */
756 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
757
758 /*
759 * Disable the context hook, if we got one.
760 */
761 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
762 {
763 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
764 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
765 AssertRC(rc);
766 }
767}
768
769
770/**
771 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
772 *
773 * @returns true if registered, false otherwise.
774 * @param pVCpu The cross context virtual CPU structure.
775 */
776DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
777{
778 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
779}
780
781
782/**
783 * Whether thread-context hooks are registered for this VCPU.
784 *
785 * @returns true if registered, false otherwise.
786 * @param pVCpu The cross context virtual CPU structure.
787 */
788VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
789{
790 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
791}
792
793
794#ifdef VBOX_WITH_STATISTICS
795/**
796 * Record return code statistics
797 * @param pVM The cross context VM structure.
798 * @param pVCpu The cross context virtual CPU structure.
799 * @param rc The status code.
800 */
801static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
802{
803 /*
804 * Collect statistics.
805 */
806 switch (rc)
807 {
808 case VINF_SUCCESS:
809 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
810 break;
811 case VINF_EM_RAW_INTERRUPT:
812 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
813 break;
814 case VINF_EM_RAW_INTERRUPT_HYPER:
815 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
816 break;
817 case VINF_EM_RAW_GUEST_TRAP:
818 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
819 break;
820 case VINF_EM_RAW_RING_SWITCH:
821 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
822 break;
823 case VINF_EM_RAW_RING_SWITCH_INT:
824 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
825 break;
826 case VINF_EM_RAW_STALE_SELECTOR:
827 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
828 break;
829 case VINF_EM_RAW_IRET_TRAP:
830 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
831 break;
832 case VINF_IOM_R3_IOPORT_READ:
833 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
834 break;
835 case VINF_IOM_R3_IOPORT_WRITE:
836 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
837 break;
838 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
839 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
840 break;
841 case VINF_IOM_R3_MMIO_READ:
842 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
843 break;
844 case VINF_IOM_R3_MMIO_WRITE:
845 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
846 break;
847 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
848 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
849 break;
850 case VINF_IOM_R3_MMIO_READ_WRITE:
851 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
852 break;
853 case VINF_PATM_HC_MMIO_PATCH_READ:
854 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
855 break;
856 case VINF_PATM_HC_MMIO_PATCH_WRITE:
857 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
858 break;
859 case VINF_CPUM_R3_MSR_READ:
860 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
861 break;
862 case VINF_CPUM_R3_MSR_WRITE:
863 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
864 break;
865 case VINF_EM_RAW_EMULATE_INSTR:
866 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
867 break;
868 case VINF_EM_RAW_EMULATE_IO_BLOCK:
869 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
870 break;
871 case VINF_PATCH_EMULATE_INSTR:
872 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
873 break;
874 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
876 break;
877 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
878 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
879 break;
880 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
881 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
882 break;
883 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
884 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
885 break;
886 case VINF_CSAM_PENDING_ACTION:
887 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
888 break;
889 case VINF_PGM_SYNC_CR3:
890 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
891 break;
892 case VINF_PATM_PATCH_INT3:
893 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
894 break;
895 case VINF_PATM_PATCH_TRAP_PF:
896 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
897 break;
898 case VINF_PATM_PATCH_TRAP_GP:
899 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
900 break;
901 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
902 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
903 break;
904 case VINF_EM_RESCHEDULE_REM:
905 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
906 break;
907 case VINF_EM_RAW_TO_R3:
908 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
909 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
910 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
911 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
912 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
913 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
914 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
915 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
916 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
917 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
918 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
919 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
920 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
921 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
922 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
923 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
924 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
925 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
926 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
927 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
928 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
929 else
930 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
931 break;
932
933 case VINF_EM_RAW_TIMER_PENDING:
934 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
935 break;
936 case VINF_EM_RAW_INTERRUPT_PENDING:
937 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
938 break;
939 case VINF_VMM_CALL_HOST:
940 switch (pVCpu->vmm.s.enmCallRing3Operation)
941 {
942 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
943 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
944 break;
945 case VMMCALLRING3_PDM_LOCK:
946 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
947 break;
948 case VMMCALLRING3_PGM_POOL_GROW:
949 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
950 break;
951 case VMMCALLRING3_PGM_LOCK:
952 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
953 break;
954 case VMMCALLRING3_PGM_MAP_CHUNK:
955 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
956 break;
957 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
958 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
959 break;
960 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
961 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
962 break;
963 case VMMCALLRING3_VMM_LOGGER_FLUSH:
964 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
965 break;
966 case VMMCALLRING3_VM_SET_ERROR:
967 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
968 break;
969 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
970 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
971 break;
972 case VMMCALLRING3_VM_R0_ASSERTION:
973 default:
974 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
975 break;
976 }
977 break;
978 case VINF_PATM_DUPLICATE_FUNCTION:
979 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
980 break;
981 case VINF_PGM_CHANGE_MODE:
982 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
983 break;
984 case VINF_PGM_POOL_FLUSH_PENDING:
985 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
986 break;
987 case VINF_EM_PENDING_REQUEST:
988 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
989 break;
990 case VINF_EM_HM_PATCH_TPR_INSTR:
991 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
992 break;
993 default:
994 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
995 break;
996 }
997}
998#endif /* VBOX_WITH_STATISTICS */
999
1000
1001/**
1002 * The Ring 0 entry point, called by the fast-ioctl path.
1003 *
1004 * @param pGVM The global (ring-0) VM structure.
1005 * @param pVM The cross context VM structure.
1006 * The return code is stored in pVM->vmm.s.iLastGZRc.
1007 * @param idCpu The Virtual CPU ID of the calling EMT.
1008 * @param enmOperation Which operation to execute.
1009 * @remarks Assume called with interrupts _enabled_.
1010 */
1011VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1012{
1013 /*
1014 * Validation.
1015 */
1016 if ( idCpu < pGVM->cCpus
1017 && pGVM->cCpus == pVM->cCpus)
1018 { /*likely*/ }
1019 else
1020 {
1021 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
1022 return;
1023 }
1024
1025 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1026 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1027 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1028 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1029 && pVCpu->hNativeThreadR0 == hNativeThread))
1030 { /* likely */ }
1031 else
1032 {
1033 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
1034 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
1035 return;
1036 }
1037
1038 /*
1039 * SMAP fun.
1040 */
1041 VMM_CHECK_SMAP_SETUP();
1042 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1043
1044 /*
1045 * Perform requested operation.
1046 */
1047 switch (enmOperation)
1048 {
1049 /*
1050 * Switch to GC and run guest raw mode code.
1051 * Disable interrupts before doing the world switch.
1052 */
1053 case VMMR0_DO_RAW_RUN:
1054 {
1055#ifdef VBOX_WITH_RAW_MODE
1056# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1057 /* Some safety precautions first. */
1058 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1059 {
1060 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
1061 break;
1062 }
1063# endif
1064 if (RT_SUCCESS(g_rcRawModeUsability))
1065 { /* likely */ }
1066 else
1067 {
1068 pVCpu->vmm.s.iLastGZRc = g_rcRawModeUsability;
1069 break;
1070 }
1071
1072 /*
1073 * Disable preemption.
1074 */
1075 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1076 RTThreadPreemptDisable(&PreemptState);
1077
1078 /*
1079 * Get the host CPU identifiers, make sure they are valid and that
1080 * we've got a TSC delta for the CPU.
1081 */
1082 RTCPUID idHostCpu;
1083 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1084 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1085 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1086 {
1087 /*
1088 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
1089 */
1090# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1091 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1092# endif
1093 pVCpu->iHostCpuSet = iHostCpuSet;
1094 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1095
1096 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1097 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1098
1099 /*
1100 * We might need to disable VT-x if the active switcher turns off paging.
1101 */
1102 bool fVTxDisabled;
1103 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1104 if (RT_SUCCESS(rc))
1105 {
1106 /*
1107 * Disable interrupts and run raw-mode code. The loop is for efficiently
1108 * dispatching tracepoints that fired in raw-mode context.
1109 */
1110 RTCCUINTREG uFlags = ASMIntDisableFlags();
1111
1112 for (;;)
1113 {
1114 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1115 TMNotifyStartOfExecution(pVCpu);
1116
1117 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1118 pVCpu->vmm.s.iLastGZRc = rc;
1119
1120 TMNotifyEndOfExecution(pVCpu);
1121 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1122
1123 if (rc != VINF_VMM_CALL_TRACER)
1124 break;
1125 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1126 }
1127
1128 /*
1129 * Re-enable VT-x before we dispatch any pending host interrupts and
1130 * re-enables interrupts.
1131 */
1132 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1133
1134 if ( rc == VINF_EM_RAW_INTERRUPT
1135 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1136 TRPMR0DispatchHostInterrupt(pVM);
1137
1138 ASMSetFlags(uFlags);
1139
1140 /* Fire dtrace probe and collect statistics. */
1141 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1142# ifdef VBOX_WITH_STATISTICS
1143 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1144 vmmR0RecordRC(pVM, pVCpu, rc);
1145# endif
1146 }
1147 else
1148 pVCpu->vmm.s.iLastGZRc = rc;
1149
1150 /*
1151 * Invalidate the host CPU identifiers as we restore preemption.
1152 */
1153 pVCpu->iHostCpuSet = UINT32_MAX;
1154 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1155
1156 RTThreadPreemptRestore(&PreemptState);
1157 }
1158 /*
1159 * Invalid CPU set index or TSC delta in need of measuring.
1160 */
1161 else
1162 {
1163 RTThreadPreemptRestore(&PreemptState);
1164 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1165 {
1166 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1167 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1168 0 /*default cTries*/);
1169 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1170 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1171 else
1172 pVCpu->vmm.s.iLastGZRc = rc;
1173 }
1174 else
1175 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1176 }
1177
1178#else /* !VBOX_WITH_RAW_MODE */
1179 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1180#endif
1181 break;
1182 }
1183
1184 /*
1185 * Run guest code using the available hardware acceleration technology.
1186 */
1187 case VMMR0_DO_HM_RUN:
1188 {
1189 /*
1190 * Disable preemption.
1191 */
1192 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1193 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1194 RTThreadPreemptDisable(&PreemptState);
1195
1196 /*
1197 * Get the host CPU identifiers, make sure they are valid and that
1198 * we've got a TSC delta for the CPU.
1199 */
1200 RTCPUID idHostCpu;
1201 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1202 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1203 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1204 {
1205 pVCpu->iHostCpuSet = iHostCpuSet;
1206 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1207
1208 /*
1209 * Update the periodic preemption timer if it's active.
1210 */
1211 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1212 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1213 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1214
1215#ifdef VMM_R0_TOUCH_FPU
1216 /*
1217 * Make sure we've got the FPU state loaded so and we don't need to clear
1218 * CR0.TS and get out of sync with the host kernel when loading the guest
1219 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1220 */
1221 CPUMR0TouchHostFpu();
1222#endif
1223 int rc;
1224 bool fPreemptRestored = false;
1225 if (!HMR0SuspendPending())
1226 {
1227 /*
1228 * Enable the context switching hook.
1229 */
1230 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1231 {
1232 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1233 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1234 }
1235
1236 /*
1237 * Enter HM context.
1238 */
1239 rc = HMR0Enter(pVCpu);
1240 if (RT_SUCCESS(rc))
1241 {
1242 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1243
1244 /*
1245 * When preemption hooks are in place, enable preemption now that
1246 * we're in HM context.
1247 */
1248 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1249 {
1250 fPreemptRestored = true;
1251 RTThreadPreemptRestore(&PreemptState);
1252 }
1253
1254 /*
1255 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1256 */
1257 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1258 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1259 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1260
1261 /*
1262 * Assert sanity on the way out. Using manual assertions code here as normal
1263 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1264 */
1265 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1266 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1267 {
1268 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1269 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1270 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1271 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1272 }
1273 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1274 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1275 {
1276 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1277 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1278 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1279 rc = VERR_INVALID_STATE;
1280 }
1281
1282 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1283 }
1284 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1285
1286 /*
1287 * Invalidate the host CPU identifiers before we disable the context
1288 * hook / restore preemption.
1289 */
1290 pVCpu->iHostCpuSet = UINT32_MAX;
1291 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1292
1293 /*
1294 * Disable context hooks. Due to unresolved cleanup issues, we
1295 * cannot leave the hooks enabled when we return to ring-3.
1296 *
1297 * Note! At the moment HM may also have disabled the hook
1298 * when we get here, but the IPRT API handles that.
1299 */
1300 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1301 {
1302 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1303 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1304 }
1305 }
1306 /*
1307 * The system is about to go into suspend mode; go back to ring 3.
1308 */
1309 else
1310 {
1311 rc = VINF_EM_RAW_INTERRUPT;
1312 pVCpu->iHostCpuSet = UINT32_MAX;
1313 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1314 }
1315
1316 /** @todo When HM stops messing with the context hook state, we'll disable
1317 * preemption again before the RTThreadCtxHookDisable call. */
1318 if (!fPreemptRestored)
1319 RTThreadPreemptRestore(&PreemptState);
1320
1321 pVCpu->vmm.s.iLastGZRc = rc;
1322
1323 /* Fire dtrace probe and collect statistics. */
1324 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1325#ifdef VBOX_WITH_STATISTICS
1326 vmmR0RecordRC(pVM, pVCpu, rc);
1327#endif
1328 }
1329 /*
1330 * Invalid CPU set index or TSC delta in need of measuring.
1331 */
1332 else
1333 {
1334 pVCpu->iHostCpuSet = UINT32_MAX;
1335 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1336 RTThreadPreemptRestore(&PreemptState);
1337 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1338 {
1339 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1340 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1341 0 /*default cTries*/);
1342 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1343 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1344 else
1345 pVCpu->vmm.s.iLastGZRc = rc;
1346 }
1347 else
1348 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1349 }
1350 break;
1351 }
1352
1353#ifdef VBOX_WITH_NEM_R0
1354# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1355 case VMMR0_DO_NEM_RUN:
1356 {
1357 /*
1358 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1359 */
1360 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1361 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1362 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1363 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1364
1365 pVCpu->vmm.s.iLastGZRc = rc;
1366
1367 /*
1368 * Fire dtrace probe and collect statistics.
1369 */
1370 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1371# ifdef VBOX_WITH_STATISTICS
1372 vmmR0RecordRC(pVM, pVCpu, rc);
1373# endif
1374 break;
1375 }
1376# endif
1377#endif
1378
1379
1380 /*
1381 * For profiling.
1382 */
1383 case VMMR0_DO_NOP:
1384 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1385 break;
1386
1387 /*
1388 * Shouldn't happen.
1389 */
1390 default:
1391 AssertMsgFailed(("%#x\n", enmOperation));
1392 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1393 break;
1394 }
1395 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1396}
1397
1398
1399/**
1400 * Validates a session or VM session argument.
1401 *
1402 * @returns true / false accordingly.
1403 * @param pVM The cross context VM structure.
1404 * @param pClaimedSession The session claim to validate.
1405 * @param pSession The session argument.
1406 */
1407DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1408{
1409 /* This must be set! */
1410 if (!pSession)
1411 return false;
1412
1413 /* Only one out of the two. */
1414 if (pVM && pClaimedSession)
1415 return false;
1416 if (pVM)
1417 pClaimedSession = pVM->pSession;
1418 return pClaimedSession == pSession;
1419}
1420
1421
1422/**
1423 * VMMR0EntryEx worker function, either called directly or when ever possible
1424 * called thru a longjmp so we can exit safely on failure.
1425 *
1426 * @returns VBox status code.
1427 * @param pGVM The global (ring-0) VM structure.
1428 * @param pVM The cross context VM structure.
1429 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1430 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1431 * @param enmOperation Which operation to execute.
1432 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1433 * The support driver validates this if it's present.
1434 * @param u64Arg Some simple constant argument.
1435 * @param pSession The session of the caller.
1436 *
1437 * @remarks Assume called with interrupts _enabled_.
1438 */
1439static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1440 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1441{
1442 /*
1443 * Validate pGVM, pVM and idCpu for consistency and validity.
1444 */
1445 if ( pGVM != NULL
1446 || pVM != NULL)
1447 {
1448 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1449 && RT_VALID_PTR(pVM)
1450 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1451 { /* likely */ }
1452 else
1453 {
1454 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1455 return VERR_INVALID_POINTER;
1456 }
1457
1458 if (RT_LIKELY(pGVM->pVM == pVM))
1459 { /* likely */ }
1460 else
1461 {
1462 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1463 return VERR_INVALID_PARAMETER;
1464 }
1465
1466 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1467 { /* likely */ }
1468 else
1469 {
1470 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1471 return VERR_INVALID_PARAMETER;
1472 }
1473
1474 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1475 && pVM->enmVMState <= VMSTATE_TERMINATED
1476 && pVM->cCpus == pGVM->cCpus
1477 && pVM->pSession == pSession
1478 && pVM->pVMR0 == pVM))
1479 { /* likely */ }
1480 else
1481 {
1482 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1483 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1484 return VERR_INVALID_POINTER;
1485 }
1486 }
1487 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1488 { /* likely */ }
1489 else
1490 {
1491 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1492 return VERR_INVALID_PARAMETER;
1493 }
1494
1495 /*
1496 * SMAP fun.
1497 */
1498 VMM_CHECK_SMAP_SETUP();
1499 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1500
1501 /*
1502 * Process the request.
1503 */
1504 int rc;
1505 switch (enmOperation)
1506 {
1507 /*
1508 * GVM requests
1509 */
1510 case VMMR0_DO_GVMM_CREATE_VM:
1511 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1512 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1513 else
1514 rc = VERR_INVALID_PARAMETER;
1515 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1516 break;
1517
1518 case VMMR0_DO_GVMM_DESTROY_VM:
1519 if (pReqHdr == NULL && u64Arg == 0)
1520 rc = GVMMR0DestroyVM(pGVM, pVM);
1521 else
1522 rc = VERR_INVALID_PARAMETER;
1523 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1524 break;
1525
1526 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1527 if (pGVM != NULL && pVM != NULL)
1528 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1529 else
1530 rc = VERR_INVALID_PARAMETER;
1531 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1532 break;
1533
1534 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1535 if (pGVM != NULL && pVM != NULL)
1536 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1537 else
1538 rc = VERR_INVALID_PARAMETER;
1539 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1540 break;
1541
1542 case VMMR0_DO_GVMM_SCHED_HALT:
1543 if (pReqHdr)
1544 return VERR_INVALID_PARAMETER;
1545 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1546 rc = GVMMR0SchedHalt(pGVM, pVM, idCpu, u64Arg);
1547 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1548 break;
1549
1550 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1551 if (pReqHdr || u64Arg)
1552 return VERR_INVALID_PARAMETER;
1553 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1554 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1555 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1556 break;
1557
1558 case VMMR0_DO_GVMM_SCHED_POKE:
1559 if (pReqHdr || u64Arg)
1560 return VERR_INVALID_PARAMETER;
1561 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1562 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1563 break;
1564
1565 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1566 if (u64Arg)
1567 return VERR_INVALID_PARAMETER;
1568 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1569 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1570 break;
1571
1572 case VMMR0_DO_GVMM_SCHED_POLL:
1573 if (pReqHdr || u64Arg > 1)
1574 return VERR_INVALID_PARAMETER;
1575 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1576 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1577 break;
1578
1579 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1580 if (u64Arg)
1581 return VERR_INVALID_PARAMETER;
1582 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1583 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1584 break;
1585
1586 case VMMR0_DO_GVMM_RESET_STATISTICS:
1587 if (u64Arg)
1588 return VERR_INVALID_PARAMETER;
1589 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1590 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1591 break;
1592
1593 /*
1594 * Initialize the R0 part of a VM instance.
1595 */
1596 case VMMR0_DO_VMMR0_INIT:
1597 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1598 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1599 break;
1600
1601 /*
1602 * Does EMT specific ring-0 init.
1603 */
1604 case VMMR0_DO_VMMR0_INIT_EMT:
1605 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
1606 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1607 break;
1608
1609 /*
1610 * Terminate the R0 part of a VM instance.
1611 */
1612 case VMMR0_DO_VMMR0_TERM:
1613 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1614 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1615 break;
1616
1617 /*
1618 * Attempt to enable hm mode and check the current setting.
1619 */
1620 case VMMR0_DO_HM_ENABLE:
1621 rc = HMR0EnableAllCpus(pVM);
1622 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1623 break;
1624
1625 /*
1626 * Setup the hardware accelerated session.
1627 */
1628 case VMMR0_DO_HM_SETUP_VM:
1629 rc = HMR0SetupVM(pVM);
1630 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1631 break;
1632
1633 /*
1634 * Switch to RC to execute Hypervisor function.
1635 */
1636 case VMMR0_DO_CALL_HYPERVISOR:
1637 {
1638#ifdef VBOX_WITH_RAW_MODE
1639 /*
1640 * Validate input / context.
1641 */
1642 if (RT_UNLIKELY(idCpu != 0))
1643 return VERR_INVALID_CPU_ID;
1644 if (RT_UNLIKELY(pVM->cCpus != 1))
1645 return VERR_INVALID_PARAMETER;
1646 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1647# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1648 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1649 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1650# endif
1651 if (RT_FAILURE(g_rcRawModeUsability))
1652 return g_rcRawModeUsability;
1653
1654 /*
1655 * Disable interrupts.
1656 */
1657 RTCCUINTREG fFlags = ASMIntDisableFlags();
1658
1659 /*
1660 * Get the host CPU identifiers, make sure they are valid and that
1661 * we've got a TSC delta for the CPU.
1662 */
1663 RTCPUID idHostCpu;
1664 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1665 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1666 {
1667 ASMSetFlags(fFlags);
1668 return VERR_INVALID_CPU_INDEX;
1669 }
1670 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1671 {
1672 ASMSetFlags(fFlags);
1673 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1674 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1675 0 /*default cTries*/);
1676 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1677 {
1678 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1679 return rc;
1680 }
1681 }
1682
1683 /*
1684 * Commit the CPU identifiers.
1685 */
1686# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1687 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1688# endif
1689 pVCpu->iHostCpuSet = iHostCpuSet;
1690 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1691
1692 /*
1693 * We might need to disable VT-x if the active switcher turns off paging.
1694 */
1695 bool fVTxDisabled;
1696 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1697 if (RT_SUCCESS(rc))
1698 {
1699 /*
1700 * Go through the wormhole...
1701 */
1702 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1703
1704 /*
1705 * Re-enable VT-x before we dispatch any pending host interrupts.
1706 */
1707 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1708
1709 if ( rc == VINF_EM_RAW_INTERRUPT
1710 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1711 TRPMR0DispatchHostInterrupt(pVM);
1712 }
1713
1714 /*
1715 * Invalidate the host CPU identifiers as we restore interrupts.
1716 */
1717 pVCpu->iHostCpuSet = UINT32_MAX;
1718 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1719 ASMSetFlags(fFlags);
1720
1721#else /* !VBOX_WITH_RAW_MODE */
1722 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1723#endif
1724 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1725 break;
1726 }
1727
1728 /*
1729 * PGM wrappers.
1730 */
1731 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1732 if (idCpu == NIL_VMCPUID)
1733 return VERR_INVALID_CPU_ID;
1734 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1735 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1736 break;
1737
1738 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1739 if (idCpu == NIL_VMCPUID)
1740 return VERR_INVALID_CPU_ID;
1741 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1742 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1743 break;
1744
1745 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1746 if (idCpu == NIL_VMCPUID)
1747 return VERR_INVALID_CPU_ID;
1748 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1749 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1750 break;
1751
1752 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1753 if (idCpu != 0)
1754 return VERR_INVALID_CPU_ID;
1755 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1756 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1757 break;
1758
1759 /*
1760 * GMM wrappers.
1761 */
1762 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1763 if (u64Arg)
1764 return VERR_INVALID_PARAMETER;
1765 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1766 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1767 break;
1768
1769 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1770 if (u64Arg)
1771 return VERR_INVALID_PARAMETER;
1772 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1773 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1774 break;
1775
1776 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1777 if (u64Arg)
1778 return VERR_INVALID_PARAMETER;
1779 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1780 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1781 break;
1782
1783 case VMMR0_DO_GMM_FREE_PAGES:
1784 if (u64Arg)
1785 return VERR_INVALID_PARAMETER;
1786 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1787 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1788 break;
1789
1790 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1791 if (u64Arg)
1792 return VERR_INVALID_PARAMETER;
1793 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1794 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1795 break;
1796
1797 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1798 if (u64Arg)
1799 return VERR_INVALID_PARAMETER;
1800 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1801 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1802 break;
1803
1804 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1805 if (idCpu == NIL_VMCPUID)
1806 return VERR_INVALID_CPU_ID;
1807 if (u64Arg)
1808 return VERR_INVALID_PARAMETER;
1809 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1810 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1811 break;
1812
1813 case VMMR0_DO_GMM_BALLOONED_PAGES:
1814 if (u64Arg)
1815 return VERR_INVALID_PARAMETER;
1816 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1817 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1818 break;
1819
1820 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1821 if (u64Arg)
1822 return VERR_INVALID_PARAMETER;
1823 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1824 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1825 break;
1826
1827 case VMMR0_DO_GMM_SEED_CHUNK:
1828 if (pReqHdr)
1829 return VERR_INVALID_PARAMETER;
1830 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1831 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1832 break;
1833
1834 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1835 if (idCpu == NIL_VMCPUID)
1836 return VERR_INVALID_CPU_ID;
1837 if (u64Arg)
1838 return VERR_INVALID_PARAMETER;
1839 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1840 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1841 break;
1842
1843 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1844 if (idCpu == NIL_VMCPUID)
1845 return VERR_INVALID_CPU_ID;
1846 if (u64Arg)
1847 return VERR_INVALID_PARAMETER;
1848 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1849 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1850 break;
1851
1852 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1853 if (idCpu == NIL_VMCPUID)
1854 return VERR_INVALID_CPU_ID;
1855 if ( u64Arg
1856 || pReqHdr)
1857 return VERR_INVALID_PARAMETER;
1858 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1859 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1860 break;
1861
1862#ifdef VBOX_WITH_PAGE_SHARING
1863 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1864 {
1865 if (idCpu == NIL_VMCPUID)
1866 return VERR_INVALID_CPU_ID;
1867 if ( u64Arg
1868 || pReqHdr)
1869 return VERR_INVALID_PARAMETER;
1870 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1871 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1872 break;
1873 }
1874#endif
1875
1876#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1877 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1878 if (u64Arg)
1879 return VERR_INVALID_PARAMETER;
1880 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1881 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1882 break;
1883#endif
1884
1885 case VMMR0_DO_GMM_QUERY_STATISTICS:
1886 if (u64Arg)
1887 return VERR_INVALID_PARAMETER;
1888 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1889 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1890 break;
1891
1892 case VMMR0_DO_GMM_RESET_STATISTICS:
1893 if (u64Arg)
1894 return VERR_INVALID_PARAMETER;
1895 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1896 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1897 break;
1898
1899 /*
1900 * A quick GCFGM mock-up.
1901 */
1902 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1903 case VMMR0_DO_GCFGM_SET_VALUE:
1904 case VMMR0_DO_GCFGM_QUERY_VALUE:
1905 {
1906 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1907 return VERR_INVALID_PARAMETER;
1908 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1909 if (pReq->Hdr.cbReq != sizeof(*pReq))
1910 return VERR_INVALID_PARAMETER;
1911 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1912 {
1913 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1914 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1915 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1916 }
1917 else
1918 {
1919 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1920 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1921 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1922 }
1923 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1924 break;
1925 }
1926
1927 /*
1928 * PDM Wrappers.
1929 */
1930 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1931 {
1932 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1933 return VERR_INVALID_PARAMETER;
1934 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1935 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1936 break;
1937 }
1938
1939 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1940 {
1941 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1942 return VERR_INVALID_PARAMETER;
1943 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1944 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1945 break;
1946 }
1947
1948 /*
1949 * Requests to the internal networking service.
1950 */
1951 case VMMR0_DO_INTNET_OPEN:
1952 {
1953 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1954 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1955 return VERR_INVALID_PARAMETER;
1956 rc = IntNetR0OpenReq(pSession, pReq);
1957 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1958 break;
1959 }
1960
1961 case VMMR0_DO_INTNET_IF_CLOSE:
1962 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1963 return VERR_INVALID_PARAMETER;
1964 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1965 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1966 break;
1967
1968
1969 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1970 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1971 return VERR_INVALID_PARAMETER;
1972 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1973 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1974 break;
1975
1976 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1977 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1978 return VERR_INVALID_PARAMETER;
1979 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1980 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1981 break;
1982
1983 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1984 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1985 return VERR_INVALID_PARAMETER;
1986 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1987 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1988 break;
1989
1990 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1991 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1992 return VERR_INVALID_PARAMETER;
1993 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1994 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1995 break;
1996
1997 case VMMR0_DO_INTNET_IF_SEND:
1998 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1999 return VERR_INVALID_PARAMETER;
2000 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2001 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2002 break;
2003
2004 case VMMR0_DO_INTNET_IF_WAIT:
2005 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2006 return VERR_INVALID_PARAMETER;
2007 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2008 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2009 break;
2010
2011 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2012 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2013 return VERR_INVALID_PARAMETER;
2014 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2015 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2016 break;
2017
2018#ifdef VBOX_WITH_PCI_PASSTHROUGH
2019 /*
2020 * Requests to host PCI driver service.
2021 */
2022 case VMMR0_DO_PCIRAW_REQ:
2023 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2024 return VERR_INVALID_PARAMETER;
2025 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2026 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2027 break;
2028#endif
2029
2030 /*
2031 * NEM requests.
2032 */
2033#ifdef VBOX_WITH_NEM_R0
2034# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2035 case VMMR0_DO_NEM_INIT_VM:
2036 if (u64Arg || pReqHdr || idCpu != 0)
2037 return VERR_INVALID_PARAMETER;
2038 rc = NEMR0InitVM(pGVM, pVM);
2039 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2040 break;
2041
2042 case VMMR0_DO_NEM_INIT_VM_PART_2:
2043 if (u64Arg || pReqHdr || idCpu != 0)
2044 return VERR_INVALID_PARAMETER;
2045 rc = NEMR0InitVMPart2(pGVM, pVM);
2046 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2047 break;
2048
2049 case VMMR0_DO_NEM_MAP_PAGES:
2050 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2051 return VERR_INVALID_PARAMETER;
2052 rc = NEMR0MapPages(pGVM, pVM, idCpu);
2053 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2054 break;
2055
2056 case VMMR0_DO_NEM_UNMAP_PAGES:
2057 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2058 return VERR_INVALID_PARAMETER;
2059 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
2060 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2061 break;
2062
2063 case VMMR0_DO_NEM_EXPORT_STATE:
2064 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2065 return VERR_INVALID_PARAMETER;
2066 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2067 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2068 break;
2069
2070 case VMMR0_DO_NEM_IMPORT_STATE:
2071 if (pReqHdr || idCpu == NIL_VMCPUID)
2072 return VERR_INVALID_PARAMETER;
2073 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2074 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2075 break;
2076
2077 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2078 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2079 return VERR_INVALID_PARAMETER;
2080 rc = NEMR0QueryCpuTick(pGVM, pVM, idCpu);
2081 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2082 break;
2083
2084 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2085 if (pReqHdr || idCpu == NIL_VMCPUID)
2086 return VERR_INVALID_PARAMETER;
2087 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg);
2088 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2089 break;
2090
2091 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2092 if (u64Arg || pReqHdr)
2093 return VERR_INVALID_PARAMETER;
2094 rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu);
2095 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2096 break;
2097
2098# if 1 && defined(DEBUG_bird)
2099 case VMMR0_DO_NEM_EXPERIMENT:
2100 if (pReqHdr)
2101 return VERR_INVALID_PARAMETER;
2102 rc = NEMR0DoExperiment(pGVM, pVM, idCpu, u64Arg);
2103 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2104 break;
2105# endif
2106# endif
2107#endif
2108
2109 /*
2110 * For profiling.
2111 */
2112 case VMMR0_DO_NOP:
2113 case VMMR0_DO_SLOW_NOP:
2114 return VINF_SUCCESS;
2115
2116 /*
2117 * For testing Ring-0 APIs invoked in this environment.
2118 */
2119 case VMMR0_DO_TESTS:
2120 /** @todo make new test */
2121 return VINF_SUCCESS;
2122
2123
2124#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
2125 case VMMR0_DO_TEST_SWITCHER3264:
2126 if (idCpu == NIL_VMCPUID)
2127 return VERR_INVALID_CPU_ID;
2128 rc = HMR0TestSwitcher3264(pVM);
2129 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2130 break;
2131#endif
2132 default:
2133 /*
2134 * We're returning VERR_NOT_SUPPORT here so we've got something else
2135 * than -1 which the interrupt gate glue code might return.
2136 */
2137 Log(("operation %#x is not supported\n", enmOperation));
2138 return VERR_NOT_SUPPORTED;
2139 }
2140 return rc;
2141}
2142
2143
2144/**
2145 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2146 */
2147typedef struct VMMR0ENTRYEXARGS
2148{
2149 PGVM pGVM;
2150 PVM pVM;
2151 VMCPUID idCpu;
2152 VMMR0OPERATION enmOperation;
2153 PSUPVMMR0REQHDR pReq;
2154 uint64_t u64Arg;
2155 PSUPDRVSESSION pSession;
2156} VMMR0ENTRYEXARGS;
2157/** Pointer to a vmmR0EntryExWrapper argument package. */
2158typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2159
2160/**
2161 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2162 *
2163 * @returns VBox status code.
2164 * @param pvArgs The argument package
2165 */
2166static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2167{
2168 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2169 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2170 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2171 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2172 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2173 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2174 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2175}
2176
2177
2178/**
2179 * The Ring 0 entry point, called by the support library (SUP).
2180 *
2181 * @returns VBox status code.
2182 * @param pGVM The global (ring-0) VM structure.
2183 * @param pVM The cross context VM structure.
2184 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2185 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2186 * @param enmOperation Which operation to execute.
2187 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2188 * @param u64Arg Some simple constant argument.
2189 * @param pSession The session of the caller.
2190 * @remarks Assume called with interrupts _enabled_.
2191 */
2192VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2193 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2194{
2195 /*
2196 * Requests that should only happen on the EMT thread will be
2197 * wrapped in a setjmp so we can assert without causing trouble.
2198 */
2199 if ( pVM != NULL
2200 && pGVM != NULL
2201 && idCpu < pGVM->cCpus
2202 && pVM->pVMR0 != NULL)
2203 {
2204 switch (enmOperation)
2205 {
2206 /* These might/will be called before VMMR3Init. */
2207 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2208 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2209 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2210 case VMMR0_DO_GMM_FREE_PAGES:
2211 case VMMR0_DO_GMM_BALLOONED_PAGES:
2212 /* On the mac we might not have a valid jmp buf, so check these as well. */
2213 case VMMR0_DO_VMMR0_INIT:
2214 case VMMR0_DO_VMMR0_TERM:
2215 {
2216 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2217 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2218 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2219 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2220 && pVCpu->hNativeThreadR0 == hNativeThread))
2221 {
2222 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2223 break;
2224
2225 /** @todo validate this EMT claim... GVM knows. */
2226 VMMR0ENTRYEXARGS Args;
2227 Args.pGVM = pGVM;
2228 Args.pVM = pVM;
2229 Args.idCpu = idCpu;
2230 Args.enmOperation = enmOperation;
2231 Args.pReq = pReq;
2232 Args.u64Arg = u64Arg;
2233 Args.pSession = pSession;
2234 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2235 }
2236 return VERR_VM_THREAD_NOT_EMT;
2237 }
2238
2239 default:
2240 break;
2241 }
2242 }
2243 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2244}
2245
2246
2247/**
2248 * Checks whether we've armed the ring-0 long jump machinery.
2249 *
2250 * @returns @c true / @c false
2251 * @param pVCpu The cross context virtual CPU structure.
2252 * @thread EMT
2253 * @sa VMMIsLongJumpArmed
2254 */
2255VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2256{
2257#ifdef RT_ARCH_X86
2258 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2259 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2260#else
2261 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2262 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2263#endif
2264}
2265
2266
2267/**
2268 * Checks whether we've done a ring-3 long jump.
2269 *
2270 * @returns @c true / @c false
2271 * @param pVCpu The cross context virtual CPU structure.
2272 * @thread EMT
2273 */
2274VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2275{
2276 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2277}
2278
2279
2280/**
2281 * Internal R0 logger worker: Flush logger.
2282 *
2283 * @param pLogger The logger instance to flush.
2284 * @remark This function must be exported!
2285 */
2286VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2287{
2288#ifdef LOG_ENABLED
2289 /*
2290 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2291 * (This is a bit paranoid code.)
2292 */
2293 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2294 if ( !VALID_PTR(pR0Logger)
2295 || !VALID_PTR(pR0Logger + 1)
2296 || pLogger->u32Magic != RTLOGGER_MAGIC)
2297 {
2298# ifdef DEBUG
2299 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2300# endif
2301 return;
2302 }
2303 if (pR0Logger->fFlushingDisabled)
2304 return; /* quietly */
2305
2306 PVM pVM = pR0Logger->pVM;
2307 if ( !VALID_PTR(pVM)
2308 || pVM->pVMR0 != pVM)
2309 {
2310# ifdef DEBUG
2311 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2312# endif
2313 return;
2314 }
2315
2316 PVMCPU pVCpu = VMMGetCpu(pVM);
2317 if (pVCpu)
2318 {
2319 /*
2320 * Check that the jump buffer is armed.
2321 */
2322# ifdef RT_ARCH_X86
2323 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2324 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2325# else
2326 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2327 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2328# endif
2329 {
2330# ifdef DEBUG
2331 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2332# endif
2333 return;
2334 }
2335 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2336 }
2337# ifdef DEBUG
2338 else
2339 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2340# endif
2341#else
2342 NOREF(pLogger);
2343#endif /* LOG_ENABLED */
2344}
2345
2346#ifdef LOG_ENABLED
2347
2348/**
2349 * Disables flushing of the ring-0 debug log.
2350 *
2351 * @param pVCpu The cross context virtual CPU structure.
2352 */
2353VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2354{
2355 if (pVCpu->vmm.s.pR0LoggerR0)
2356 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2357 if (pVCpu->vmm.s.pR0RelLoggerR0)
2358 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2359}
2360
2361
2362/**
2363 * Enables flushing of the ring-0 debug log.
2364 *
2365 * @param pVCpu The cross context virtual CPU structure.
2366 */
2367VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2368{
2369 if (pVCpu->vmm.s.pR0LoggerR0)
2370 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2371 if (pVCpu->vmm.s.pR0RelLoggerR0)
2372 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2373}
2374
2375
2376/**
2377 * Checks if log flushing is disabled or not.
2378 *
2379 * @param pVCpu The cross context virtual CPU structure.
2380 */
2381VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2382{
2383 if (pVCpu->vmm.s.pR0LoggerR0)
2384 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2385 if (pVCpu->vmm.s.pR0RelLoggerR0)
2386 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2387 return true;
2388}
2389
2390#endif /* LOG_ENABLED */
2391
2392/**
2393 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2394 */
2395DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2396{
2397 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2398 if (pGVCpu)
2399 {
2400 PVMCPU pVCpu = pGVCpu->pVCpu;
2401 if (RT_VALID_PTR(pVCpu))
2402 {
2403 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2404 if (RT_VALID_PTR(pVmmLogger))
2405 {
2406 if ( pVmmLogger->fCreated
2407 && pVmmLogger->pVM == pGVCpu->pVM)
2408 {
2409 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2410 return NULL;
2411 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2412 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2413 if ( iGroup != UINT16_MAX
2414 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2415 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2416 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2417 return NULL;
2418 return &pVmmLogger->Logger;
2419 }
2420 }
2421 }
2422 }
2423 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2424}
2425
2426
2427/**
2428 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2429 *
2430 * @returns true if the breakpoint should be hit, false if it should be ignored.
2431 */
2432DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2433{
2434#if 0
2435 return true;
2436#else
2437 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2438 if (pVM)
2439 {
2440 PVMCPU pVCpu = VMMGetCpu(pVM);
2441
2442 if (pVCpu)
2443 {
2444#ifdef RT_ARCH_X86
2445 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2446 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2447#else
2448 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2449 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2450#endif
2451 {
2452 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2453 return RT_FAILURE_NP(rc);
2454 }
2455 }
2456 }
2457#ifdef RT_OS_LINUX
2458 return true;
2459#else
2460 return false;
2461#endif
2462#endif
2463}
2464
2465
2466/**
2467 * Override this so we can push it up to ring-3.
2468 *
2469 * @param pszExpr Expression. Can be NULL.
2470 * @param uLine Location line number.
2471 * @param pszFile Location file name.
2472 * @param pszFunction Location function name.
2473 */
2474DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2475{
2476 /*
2477 * To the log.
2478 */
2479 LogAlways(("\n!!R0-Assertion Failed!!\n"
2480 "Expression: %s\n"
2481 "Location : %s(%d) %s\n",
2482 pszExpr, pszFile, uLine, pszFunction));
2483
2484 /*
2485 * To the global VMM buffer.
2486 */
2487 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2488 if (pVM)
2489 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2490 "\n!!R0-Assertion Failed!!\n"
2491 "Expression: %.*s\n"
2492 "Location : %s(%d) %s\n",
2493 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2494 pszFile, uLine, pszFunction);
2495
2496 /*
2497 * Continue the normal way.
2498 */
2499 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2500}
2501
2502
2503/**
2504 * Callback for RTLogFormatV which writes to the ring-3 log port.
2505 * See PFNLOGOUTPUT() for details.
2506 */
2507static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2508{
2509 for (size_t i = 0; i < cbChars; i++)
2510 {
2511 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2512 }
2513
2514 NOREF(pv);
2515 return cbChars;
2516}
2517
2518
2519/**
2520 * Override this so we can push it up to ring-3.
2521 *
2522 * @param pszFormat The format string.
2523 * @param va Arguments.
2524 */
2525DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2526{
2527 va_list vaCopy;
2528
2529 /*
2530 * Push the message to the loggers.
2531 */
2532 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2533 if (pLog)
2534 {
2535 va_copy(vaCopy, va);
2536 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2537 va_end(vaCopy);
2538 }
2539 pLog = RTLogRelGetDefaultInstance();
2540 if (pLog)
2541 {
2542 va_copy(vaCopy, va);
2543 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2544 va_end(vaCopy);
2545 }
2546
2547 /*
2548 * Push it to the global VMM buffer.
2549 */
2550 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2551 if (pVM)
2552 {
2553 va_copy(vaCopy, va);
2554 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2555 va_end(vaCopy);
2556 }
2557
2558 /*
2559 * Continue the normal way.
2560 */
2561 RTAssertMsg2V(pszFormat, va);
2562}
2563
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette