VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 80273

Last change on this file since 80273 was 80052, checked in by vboxsync, 5 years ago

Main: Kicking out 32-bit host support - Some HM bits using VMMSwitcher & CPUMHyper. bugref:9511

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 95.7 KB
Line 
1/* $Id: VMMR0.cpp 80052 2019-07-29 20:36:52Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/stam.h>
34#include <VBox/vmm/tm.h>
35#include "VMMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/gvm.h>
38#ifdef VBOX_WITH_PCI_PASSTHROUGH
39# include <VBox/vmm/pdmpci.h>
40#endif
41#include <VBox/vmm/apic.h>
42
43#include <VBox/vmm/gvmm.h>
44#include <VBox/vmm/gmm.h>
45#include <VBox/vmm/gim.h>
46#include <VBox/intnet.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <VBox/version.h>
51#include <VBox/log.h>
52
53#include <iprt/asm-amd64-x86.h>
54#include <iprt/assert.h>
55#include <iprt/crc.h>
56#include <iprt/mp.h>
57#include <iprt/once.h>
58#include <iprt/stdarg.h>
59#include <iprt/string.h>
60#include <iprt/thread.h>
61#include <iprt/timer.h>
62#include <iprt/time.h>
63
64#include "dtrace/VBoxVMM.h"
65
66
67#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
68# pragma intrinsic(_AddressOfReturnAddress)
69#endif
70
71#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
72# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
73#endif
74
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80/** @def VMM_CHECK_SMAP_SETUP
81 * SMAP check setup. */
82/** @def VMM_CHECK_SMAP_CHECK
83 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
84 * it will be logged and @a a_BadExpr is executed. */
85/** @def VMM_CHECK_SMAP_CHECK2
86 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
87 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
88 * executed. */
89#if defined(VBOX_STRICT) || 1
90# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
91# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
92 do { \
93 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
94 { \
95 RTCCUINTREG fEflCheck = ASMGetFlags(); \
96 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
97 { /* likely */ } \
98 else \
99 { \
100 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
101 a_BadExpr; \
102 } \
103 } \
104 } while (0)
105# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
106 do { \
107 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
108 { \
109 RTCCUINTREG fEflCheck = ASMGetFlags(); \
110 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
111 { /* likely */ } \
112 else \
113 { \
114 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
115 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
117 a_BadExpr; \
118 } \
119 } \
120 } while (0)
121#else
122# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
123# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
124# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
125#endif
126
127
128/*********************************************************************************************************************************
129* Internal Functions *
130*********************************************************************************************************************************/
131RT_C_DECLS_BEGIN
132#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
133extern uint64_t __udivdi3(uint64_t, uint64_t);
134extern uint64_t __umoddi3(uint64_t, uint64_t);
135#endif
136RT_C_DECLS_END
137
138
139/*********************************************************************************************************************************
140* Global Variables *
141*********************************************************************************************************************************/
142/** Drag in necessary library bits.
143 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
144PFNRT g_VMMR0Deps[] =
145{
146 (PFNRT)RTCrc32,
147 (PFNRT)RTOnce,
148#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
149 (PFNRT)__udivdi3,
150 (PFNRT)__umoddi3,
151#endif
152 NULL
153};
154
155#ifdef RT_OS_SOLARIS
156/* Dependency information for the native solaris loader. */
157extern "C" { char _depends_on[] = "vboxdrv"; }
158#endif
159
160/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
161int g_rcRawModeUsability = VINF_SUCCESS;
162
163
164/**
165 * Initialize the module.
166 * This is called when we're first loaded.
167 *
168 * @returns 0 on success.
169 * @returns VBox status on failure.
170 * @param hMod Image handle for use in APIs.
171 */
172DECLEXPORT(int) ModuleInit(void *hMod)
173{
174 VMM_CHECK_SMAP_SETUP();
175 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
176
177#ifdef VBOX_WITH_DTRACE_R0
178 /*
179 * The first thing to do is register the static tracepoints.
180 * (Deregistration is automatic.)
181 */
182 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
183 if (RT_FAILURE(rc2))
184 return rc2;
185#endif
186 LogFlow(("ModuleInit:\n"));
187
188#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
189 /*
190 * Display the CMOS debug code.
191 */
192 ASMOutU8(0x72, 0x03);
193 uint8_t bDebugCode = ASMInU8(0x73);
194 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
195 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
196#endif
197
198 /*
199 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
200 */
201 int rc = vmmInitFormatTypes();
202 if (RT_SUCCESS(rc))
203 {
204 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
205 rc = GVMMR0Init();
206 if (RT_SUCCESS(rc))
207 {
208 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
209 rc = GMMR0Init();
210 if (RT_SUCCESS(rc))
211 {
212 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
213 rc = HMR0Init();
214 if (RT_SUCCESS(rc))
215 {
216 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
217 rc = PGMRegisterStringFormatTypes();
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
222 rc = PGMR0DynMapInit();
223#endif
224 if (RT_SUCCESS(rc))
225 {
226 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
227 rc = IntNetR0Init();
228 if (RT_SUCCESS(rc))
229 {
230#ifdef VBOX_WITH_PCI_PASSTHROUGH
231 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
232 rc = PciRawR0Init();
233#endif
234 if (RT_SUCCESS(rc))
235 {
236 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
237 rc = CPUMR0ModuleInit();
238 if (RT_SUCCESS(rc))
239 {
240#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
241 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
242 rc = vmmR0TripleFaultHackInit();
243 if (RT_SUCCESS(rc))
244#endif
245 {
246 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
247 if (RT_SUCCESS(rc))
248 {
249 g_rcRawModeUsability = SUPR0GetRawModeUsability();
250 if (g_rcRawModeUsability != VINF_SUCCESS)
251 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
252 g_rcRawModeUsability);
253 LogFlow(("ModuleInit: returns success\n"));
254 return VINF_SUCCESS;
255 }
256 }
257
258 /*
259 * Bail out.
260 */
261#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
262 vmmR0TripleFaultHackTerm();
263#endif
264 }
265 else
266 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
267#ifdef VBOX_WITH_PCI_PASSTHROUGH
268 PciRawR0Term();
269#endif
270 }
271 else
272 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
273 IntNetR0Term();
274 }
275 else
276 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
277#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
278 PGMR0DynMapTerm();
279#endif
280 }
281 else
282 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
283 PGMDeregisterStringFormatTypes();
284 }
285 else
286 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
287 HMR0Term();
288 }
289 else
290 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
291 GMMR0Term();
292 }
293 else
294 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
295 GVMMR0Term();
296 }
297 else
298 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
299 vmmTermFormatTypes();
300 }
301 else
302 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
303
304 LogFlow(("ModuleInit: failed %Rrc\n", rc));
305 return rc;
306}
307
308
309/**
310 * Terminate the module.
311 * This is called when we're finally unloaded.
312 *
313 * @param hMod Image handle for use in APIs.
314 */
315DECLEXPORT(void) ModuleTerm(void *hMod)
316{
317 NOREF(hMod);
318 LogFlow(("ModuleTerm:\n"));
319
320 /*
321 * Terminate the CPUM module (Local APIC cleanup).
322 */
323 CPUMR0ModuleTerm();
324
325 /*
326 * Terminate the internal network service.
327 */
328 IntNetR0Term();
329
330 /*
331 * PGM (Darwin), HM and PciRaw global cleanup.
332 */
333#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
334 PGMR0DynMapTerm();
335#endif
336#ifdef VBOX_WITH_PCI_PASSTHROUGH
337 PciRawR0Term();
338#endif
339 PGMDeregisterStringFormatTypes();
340 HMR0Term();
341#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
342 vmmR0TripleFaultHackTerm();
343#endif
344
345 /*
346 * Destroy the GMM and GVMM instances.
347 */
348 GMMR0Term();
349 GVMMR0Term();
350
351 vmmTermFormatTypes();
352
353 LogFlow(("ModuleTerm: returns\n"));
354}
355
356
357/**
358 * Initiates the R0 driver for a particular VM instance.
359 *
360 * @returns VBox status code.
361 *
362 * @param pGVM The global (ring-0) VM structure.
363 * @param pVM The cross context VM structure.
364 * @param uSvnRev The SVN revision of the ring-3 part.
365 * @param uBuildType Build type indicator.
366 * @thread EMT(0)
367 */
368static int vmmR0InitVM(PGVM pGVM, PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
369{
370 VMM_CHECK_SMAP_SETUP();
371 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
372
373 /*
374 * Match the SVN revisions and build type.
375 */
376 if (uSvnRev != VMMGetSvnRev())
377 {
378 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
379 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
380 return VERR_VMM_R0_VERSION_MISMATCH;
381 }
382 if (uBuildType != vmmGetBuildType())
383 {
384 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
385 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
386 return VERR_VMM_R0_VERSION_MISMATCH;
387 }
388
389 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
390 if (RT_FAILURE(rc))
391 return rc;
392
393#ifdef LOG_ENABLED
394 /*
395 * Register the EMT R0 logger instance for VCPU 0.
396 */
397#ifdef VBOX_BUGREF_9217
398 PVMCPU pVCpu = &pGVM->aCpus[0];
399#else
400 PVMCPU pVCpu = &pVM->aCpus[0];
401#endif
402
403 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
404 if (pR0Logger)
405 {
406# if 0 /* testing of the logger. */
407 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
408 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
409 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
410 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
411
412 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
413 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
414 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
415 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
416
417 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
418 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
419 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
420 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
421
422 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
423 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
424 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
425 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
426 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
427 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
428
429 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
430 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
431
432 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
433 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
434 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
435# endif
436 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
437 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
438 pR0Logger->fRegistered = true;
439 }
440#endif /* LOG_ENABLED */
441
442 /*
443 * Check if the host supports high resolution timers or not.
444 */
445 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
446 && !RTTimerCanDoHighResolution())
447 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
448
449 /*
450 * Initialize the per VM data for GVMM and GMM.
451 */
452 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
453 rc = GVMMR0InitVM(pGVM);
454// if (RT_SUCCESS(rc))
455// rc = GMMR0InitPerVMData(pVM);
456 if (RT_SUCCESS(rc))
457 {
458 /*
459 * Init HM, CPUM and PGM (Darwin only).
460 */
461 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
462 rc = HMR0InitVM(pVM);
463 if (RT_SUCCESS(rc))
464 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
465 if (RT_SUCCESS(rc))
466 {
467 rc = CPUMR0InitVM(pVM);
468 if (RT_SUCCESS(rc))
469 {
470 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
471#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
472 rc = PGMR0DynMapInitVM(pVM);
473#endif
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
477#ifdef VBOX_BUGREF_9217
478 rc = EMR0InitVM(pGVM);
479#else
480 rc = EMR0InitVM(pGVM, pVM);
481#endif
482 if (RT_SUCCESS(rc))
483 {
484 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
485#ifdef VBOX_WITH_PCI_PASSTHROUGH
486 rc = PciRawR0InitVM(pGVM, pVM);
487#endif
488 if (RT_SUCCESS(rc))
489 {
490 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
491 rc = GIMR0InitVM(pVM);
492 if (RT_SUCCESS(rc))
493 {
494 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
495 if (RT_SUCCESS(rc))
496 {
497 GVMMR0DoneInitVM(pGVM);
498
499 /*
500 * Collect a bit of info for the VM release log.
501 */
502 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
503 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
504
505 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
506 return rc;
507 }
508
509 /* bail out*/
510 GIMR0TermVM(pVM);
511 }
512#ifdef VBOX_WITH_PCI_PASSTHROUGH
513 PciRawR0TermVM(pGVM, pVM);
514#endif
515 }
516 }
517 }
518 }
519 HMR0TermVM(pVM);
520 }
521 }
522
523 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
524 return rc;
525}
526
527
528/**
529 * Does EMT specific VM initialization.
530 *
531 * @returns VBox status code.
532 * @param pGVM The ring-0 VM structure.
533 * @param pVM The cross context VM structure.
534 * @param idCpu The EMT that's calling.
535 */
536static int vmmR0InitVMEmt(PGVM pGVM, PVM pVM, VMCPUID idCpu)
537{
538 /* Paranoia (caller checked these already). */
539 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
540 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
541
542#ifdef LOG_ENABLED
543 /*
544 * Registration of ring 0 loggers.
545 */
546#ifdef VBOX_BUGREF_9217
547 PVMCPU pVCpu = &pGVM->aCpus[idCpu];
548#else
549 PVMCPU pVCpu = &pVM->aCpus[idCpu];
550#endif
551 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
552 if ( pR0Logger
553 && !pR0Logger->fRegistered)
554 {
555 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
556 pR0Logger->fRegistered = true;
557 }
558#endif
559 RT_NOREF(pVM);
560
561 return VINF_SUCCESS;
562}
563
564
565
566/**
567 * Terminates the R0 bits for a particular VM instance.
568 *
569 * This is normally called by ring-3 as part of the VM termination process, but
570 * may alternatively be called during the support driver session cleanup when
571 * the VM object is destroyed (see GVMM).
572 *
573 * @returns VBox status code.
574 *
575 * @param pGVM The global (ring-0) VM structure.
576 * @param pVM The cross context VM structure.
577 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
578 * thread.
579 * @thread EMT(0) or session clean up thread.
580 */
581VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu)
582{
583 /*
584 * Check EMT(0) claim if we're called from userland.
585 */
586 if (idCpu != NIL_VMCPUID)
587 {
588 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
589 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
590 if (RT_FAILURE(rc))
591 return rc;
592 }
593
594#ifdef VBOX_WITH_PCI_PASSTHROUGH
595 PciRawR0TermVM(pGVM, pVM);
596#endif
597
598 /*
599 * Tell GVMM what we're up to and check that we only do this once.
600 */
601 if (GVMMR0DoingTermVM(pGVM))
602 {
603 GIMR0TermVM(pVM);
604
605 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
606 * here to make sure we don't leak any shared pages if we crash... */
607#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
608 PGMR0DynMapTermVM(pVM);
609#endif
610 HMR0TermVM(pVM);
611 }
612
613 /*
614 * Deregister the logger.
615 */
616 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
617 return VINF_SUCCESS;
618}
619
620
621/**
622 * An interrupt or unhalt force flag is set, deal with it.
623 *
624 * @returns VINF_SUCCESS (or VINF_EM_HALT).
625 * @param pVCpu The cross context virtual CPU structure.
626 * @param uMWait Result from EMMonitorWaitIsActive().
627 * @param enmInterruptibility Guest CPU interruptbility level.
628 */
629static int vmmR0DoHaltInterrupt(PVMCPU pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
630{
631 Assert(!TRPMHasTrap(pVCpu));
632 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
633 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
634
635 /*
636 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
637 */
638 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
639 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
640 {
641 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
642 {
643 uint8_t u8Interrupt = 0;
644 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
645 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
646 if (RT_SUCCESS(rc))
647 {
648 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
649
650 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
651 AssertRCSuccess(rc);
652 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
653 return rc;
654 }
655 }
656 }
657 /*
658 * SMI is not implemented yet, at least not here.
659 */
660 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
661 {
662 return VINF_EM_HALT;
663 }
664 /*
665 * NMI.
666 */
667 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
668 {
669 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
670 {
671 /** @todo later. */
672 return VINF_EM_HALT;
673 }
674 }
675 /*
676 * Nested-guest virtual interrupt.
677 */
678 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
679 {
680 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
681 {
682 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
683 * here before injecting the virtual interrupt. See emR3ForcedActions
684 * for details. */
685 return VINF_EM_HALT;
686 }
687 }
688
689 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
690 {
691 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
692 return VINF_SUCCESS;
693 }
694 if (uMWait > 1)
695 {
696 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
697 return VINF_SUCCESS;
698 }
699
700 return VINF_EM_HALT;
701}
702
703
704/**
705 * This does one round of vmR3HaltGlobal1Halt().
706 *
707 * The rational here is that we'll reduce latency in interrupt situations if we
708 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
709 * MWAIT), but do one round of blocking here instead and hope the interrupt is
710 * raised in the meanwhile.
711 *
712 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
713 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
714 * ring-0 call (unless we're too close to a timer event). When the interrupt
715 * wakes us up, we'll return from ring-0 and EM will by instinct do a
716 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
717 * back to VMMR0EntryFast().
718 *
719 * @returns VINF_SUCCESS or VINF_EM_HALT.
720 * @param pGVM The ring-0 VM structure.
721 * @param pVM The cross context VM structure.
722 * @param pGVCpu The ring-0 virtual CPU structure.
723 * @param pVCpu The cross context virtual CPU structure.
724 *
725 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
726 * the VM module, probably to VMM. Then this would be more weird wrt
727 * parameters and statistics.
728 */
729static int vmmR0DoHalt(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, PVMCPU pVCpu)
730{
731#ifdef VBOX_BUGREF_9217
732 Assert(pVCpu == pGVCpu);
733#else
734 Assert(pVCpu == pGVCpu->pVCpu);
735#endif
736
737 /*
738 * Do spin stat historization.
739 */
740 if (++pVCpu->vmm.s.cR0Halts & 0xff)
741 { /* likely */ }
742 else if (pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3)
743 {
744 pVCpu->vmm.s.cR0HaltsSucceeded = 2;
745 pVCpu->vmm.s.cR0HaltsToRing3 = 0;
746 }
747 else
748 {
749 pVCpu->vmm.s.cR0HaltsSucceeded = 0;
750 pVCpu->vmm.s.cR0HaltsToRing3 = 2;
751 }
752
753 /*
754 * Flags that makes us go to ring-3.
755 */
756 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
757 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
758 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
759 | VM_FF_PGM_NO_MEMORY | VM_FF_REM_HANDLER_NOTIFY | VM_FF_DEBUG_SUSPEND;
760 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
761 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
762 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
763 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
764
765 /*
766 * Check preconditions.
767 */
768 unsigned const uMWait = EMMonitorWaitIsActive(pVCpu);
769 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pVCpu);
770 if ( pVCpu->vmm.s.fMayHaltInRing0
771 && !TRPMHasTrap(pVCpu)
772 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
773 || uMWait > 1))
774 {
775 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
776 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
777 {
778 /*
779 * Interrupts pending already?
780 */
781 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
782 APICUpdatePendingInterrupts(pVCpu);
783
784 /*
785 * Flags that wake up from the halted state.
786 */
787 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
788 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
789
790 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
791 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
792 ASMNopPause();
793
794 /*
795 * Check out how long till the next timer event.
796 */
797 uint64_t u64Delta;
798 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
799
800 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
801 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
802 {
803 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
804 APICUpdatePendingInterrupts(pVCpu);
805
806 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
807 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
808
809 /*
810 * Wait if there is enough time to the next timer event.
811 */
812 if (u64Delta >= pVCpu->vmm.s.cNsSpinBlockThreshold)
813 {
814 /* If there are few other CPU cores around, we will procrastinate a
815 little before going to sleep, hoping for some device raising an
816 interrupt or similar. Though, the best thing here would be to
817 dynamically adjust the spin count according to its usfulness or
818 something... */
819 if ( pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3
820 && RTMpGetOnlineCount() >= 4)
821 {
822 /** @todo Figure out how we can skip this if it hasn't help recently...
823 * @bugref{9172#c12} */
824 uint32_t cSpinLoops = 42;
825 while (cSpinLoops-- > 0)
826 {
827 ASMNopPause();
828 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
829 APICUpdatePendingInterrupts(pVCpu);
830 ASMNopPause();
831 if (VM_FF_IS_ANY_SET(pVM, fVmFFs))
832 {
833 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
834 return VINF_EM_HALT;
835 }
836 ASMNopPause();
837 if (VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
838 {
839 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
840 return VINF_EM_HALT;
841 }
842 ASMNopPause();
843 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
844 {
845 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromSpin);
846 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
847 }
848 ASMNopPause();
849 }
850 }
851
852 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
853 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
854 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
855 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
856 int rc = GVMMR0SchedHalt(pGVM, pVM, pGVCpu, u64GipTime);
857 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
858 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
859 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
860 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
861 if ( rc == VINF_SUCCESS
862 || rc == VERR_INTERRUPTED)
863
864 {
865 /* Keep some stats like ring-3 does. */
866 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
867 if (cNsOverslept > 50000)
868 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
869 else if (cNsOverslept < -50000)
870 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
871 else
872 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
873
874 /*
875 * Recheck whether we can resume execution or have to go to ring-3.
876 */
877 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
878 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
879 {
880 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
881 APICUpdatePendingInterrupts(pVCpu);
882 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
883 {
884 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromBlock);
885 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
886 }
887 }
888 }
889 }
890 }
891 }
892 }
893 return VINF_EM_HALT;
894}
895
896
897/**
898 * VMM ring-0 thread-context callback.
899 *
900 * This does common HM state updating and calls the HM-specific thread-context
901 * callback.
902 *
903 * @param enmEvent The thread-context event.
904 * @param pvUser Opaque pointer to the VMCPU.
905 *
906 * @thread EMT(pvUser)
907 */
908static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
909{
910 PVMCPU pVCpu = (PVMCPU)pvUser;
911
912 switch (enmEvent)
913 {
914 case RTTHREADCTXEVENT_IN:
915 {
916 /*
917 * Linux may call us with preemption enabled (really!) but technically we
918 * cannot get preempted here, otherwise we end up in an infinite recursion
919 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
920 * ad infinitum). Let's just disable preemption for now...
921 */
922 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
923 * preemption after doing the callout (one or two functions up the
924 * call chain). */
925 /** @todo r=ramshankar: See @bugref{5313#c30}. */
926 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
927 RTThreadPreemptDisable(&ParanoidPreemptState);
928
929 /* We need to update the VCPU <-> host CPU mapping. */
930 RTCPUID idHostCpu;
931 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
932 pVCpu->iHostCpuSet = iHostCpuSet;
933 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
934
935 /* In the very unlikely event that the GIP delta for the CPU we're
936 rescheduled needs calculating, try force a return to ring-3.
937 We unfortunately cannot do the measurements right here. */
938 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
939 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
940
941 /* Invoke the HM-specific thread-context callback. */
942 HMR0ThreadCtxCallback(enmEvent, pvUser);
943
944 /* Restore preemption. */
945 RTThreadPreemptRestore(&ParanoidPreemptState);
946 break;
947 }
948
949 case RTTHREADCTXEVENT_OUT:
950 {
951 /* Invoke the HM-specific thread-context callback. */
952 HMR0ThreadCtxCallback(enmEvent, pvUser);
953
954 /*
955 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
956 * have the same host CPU associated with it.
957 */
958 pVCpu->iHostCpuSet = UINT32_MAX;
959 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
960 break;
961 }
962
963 default:
964 /* Invoke the HM-specific thread-context callback. */
965 HMR0ThreadCtxCallback(enmEvent, pvUser);
966 break;
967 }
968}
969
970
971/**
972 * Creates thread switching hook for the current EMT thread.
973 *
974 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
975 * platform does not implement switcher hooks, no hooks will be create and the
976 * member set to NIL_RTTHREADCTXHOOK.
977 *
978 * @returns VBox status code.
979 * @param pVCpu The cross context virtual CPU structure.
980 * @thread EMT(pVCpu)
981 */
982VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
983{
984 VMCPU_ASSERT_EMT(pVCpu);
985 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
986
987#if 1 /* To disable this stuff change to zero. */
988 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
989 if (RT_SUCCESS(rc))
990 return rc;
991#else
992 RT_NOREF(vmmR0ThreadCtxCallback);
993 int rc = VERR_NOT_SUPPORTED;
994#endif
995
996 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
997 if (rc == VERR_NOT_SUPPORTED)
998 return VINF_SUCCESS;
999
1000 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1001 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1002}
1003
1004
1005/**
1006 * Destroys the thread switching hook for the specified VCPU.
1007 *
1008 * @param pVCpu The cross context virtual CPU structure.
1009 * @remarks Can be called from any thread.
1010 */
1011VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
1012{
1013 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
1014 AssertRC(rc);
1015 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1016}
1017
1018
1019/**
1020 * Disables the thread switching hook for this VCPU (if we got one).
1021 *
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @thread EMT(pVCpu)
1024 *
1025 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1026 * this call. This means you have to be careful with what you do!
1027 */
1028VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
1029{
1030 /*
1031 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1032 * @bugref{7726#c19} explains the need for this trick:
1033 *
1034 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
1035 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1036 * longjmp & normal return to ring-3, which opens a window where we may be
1037 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1038 * the CPU starts executing a different EMT. Both functions first disables
1039 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1040 * an opening for getting preempted.
1041 */
1042 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1043 * all the time. */
1044 /** @todo move this into the context hook disabling if(). */
1045 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1046
1047 /*
1048 * Disable the context hook, if we got one.
1049 */
1050 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1051 {
1052 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1053 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1054 AssertRC(rc);
1055 }
1056}
1057
1058
1059/**
1060 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1061 *
1062 * @returns true if registered, false otherwise.
1063 * @param pVCpu The cross context virtual CPU structure.
1064 */
1065DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
1066{
1067 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1068}
1069
1070
1071/**
1072 * Whether thread-context hooks are registered for this VCPU.
1073 *
1074 * @returns true if registered, false otherwise.
1075 * @param pVCpu The cross context virtual CPU structure.
1076 */
1077VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
1078{
1079 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1080}
1081
1082
1083#ifdef VBOX_WITH_STATISTICS
1084/**
1085 * Record return code statistics
1086 * @param pVM The cross context VM structure.
1087 * @param pVCpu The cross context virtual CPU structure.
1088 * @param rc The status code.
1089 */
1090static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
1091{
1092 /*
1093 * Collect statistics.
1094 */
1095 switch (rc)
1096 {
1097 case VINF_SUCCESS:
1098 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1099 break;
1100 case VINF_EM_RAW_INTERRUPT:
1101 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1102 break;
1103 case VINF_EM_RAW_INTERRUPT_HYPER:
1104 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1105 break;
1106 case VINF_EM_RAW_GUEST_TRAP:
1107 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1108 break;
1109 case VINF_EM_RAW_RING_SWITCH:
1110 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1111 break;
1112 case VINF_EM_RAW_RING_SWITCH_INT:
1113 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1114 break;
1115 case VINF_EM_RAW_STALE_SELECTOR:
1116 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1117 break;
1118 case VINF_EM_RAW_IRET_TRAP:
1119 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1120 break;
1121 case VINF_IOM_R3_IOPORT_READ:
1122 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1123 break;
1124 case VINF_IOM_R3_IOPORT_WRITE:
1125 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1126 break;
1127 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1128 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1129 break;
1130 case VINF_IOM_R3_MMIO_READ:
1131 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1132 break;
1133 case VINF_IOM_R3_MMIO_WRITE:
1134 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1135 break;
1136 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1137 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1138 break;
1139 case VINF_IOM_R3_MMIO_READ_WRITE:
1140 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1141 break;
1142 case VINF_PATM_HC_MMIO_PATCH_READ:
1143 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1144 break;
1145 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1146 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1147 break;
1148 case VINF_CPUM_R3_MSR_READ:
1149 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1150 break;
1151 case VINF_CPUM_R3_MSR_WRITE:
1152 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1153 break;
1154 case VINF_EM_RAW_EMULATE_INSTR:
1155 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1156 break;
1157 case VINF_PATCH_EMULATE_INSTR:
1158 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1159 break;
1160 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1161 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1162 break;
1163 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1164 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1165 break;
1166 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1167 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1168 break;
1169 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1170 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1171 break;
1172 case VINF_CSAM_PENDING_ACTION:
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1174 break;
1175 case VINF_PGM_SYNC_CR3:
1176 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1177 break;
1178 case VINF_PATM_PATCH_INT3:
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1180 break;
1181 case VINF_PATM_PATCH_TRAP_PF:
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1183 break;
1184 case VINF_PATM_PATCH_TRAP_GP:
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1186 break;
1187 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1189 break;
1190 case VINF_EM_RESCHEDULE_REM:
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1192 break;
1193 case VINF_EM_RAW_TO_R3:
1194 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1195 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1196 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1197 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1198 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1199 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1200 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1201 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1202 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1203 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1204 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1205 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1207 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1208 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1209 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1210 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1211 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1213 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1214 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1215 else
1216 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1217 break;
1218
1219 case VINF_EM_RAW_TIMER_PENDING:
1220 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1221 break;
1222 case VINF_EM_RAW_INTERRUPT_PENDING:
1223 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1224 break;
1225 case VINF_VMM_CALL_HOST:
1226 switch (pVCpu->vmm.s.enmCallRing3Operation)
1227 {
1228 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1229 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1230 break;
1231 case VMMCALLRING3_PDM_LOCK:
1232 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1233 break;
1234 case VMMCALLRING3_PGM_POOL_GROW:
1235 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1236 break;
1237 case VMMCALLRING3_PGM_LOCK:
1238 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1239 break;
1240 case VMMCALLRING3_PGM_MAP_CHUNK:
1241 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1242 break;
1243 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1244 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1245 break;
1246 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
1247 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
1248 break;
1249 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1250 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1251 break;
1252 case VMMCALLRING3_VM_SET_ERROR:
1253 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1254 break;
1255 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1256 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1257 break;
1258 case VMMCALLRING3_VM_R0_ASSERTION:
1259 default:
1260 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1261 break;
1262 }
1263 break;
1264 case VINF_PATM_DUPLICATE_FUNCTION:
1265 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1266 break;
1267 case VINF_PGM_CHANGE_MODE:
1268 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1269 break;
1270 case VINF_PGM_POOL_FLUSH_PENDING:
1271 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1272 break;
1273 case VINF_EM_PENDING_REQUEST:
1274 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1275 break;
1276 case VINF_EM_HM_PATCH_TPR_INSTR:
1277 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1278 break;
1279 default:
1280 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1281 break;
1282 }
1283}
1284#endif /* VBOX_WITH_STATISTICS */
1285
1286
1287/**
1288 * The Ring 0 entry point, called by the fast-ioctl path.
1289 *
1290 * @param pGVM The global (ring-0) VM structure.
1291 * @param pVM The cross context VM structure.
1292 * The return code is stored in pVM->vmm.s.iLastGZRc.
1293 * @param idCpu The Virtual CPU ID of the calling EMT.
1294 * @param enmOperation Which operation to execute.
1295 * @remarks Assume called with interrupts _enabled_.
1296 */
1297VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1298{
1299 /*
1300 * Validation.
1301 */
1302 if ( idCpu < pGVM->cCpus
1303 && pGVM->cCpus == pVM->cCpus)
1304 { /*likely*/ }
1305 else
1306 {
1307 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
1308 return;
1309 }
1310
1311 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1312#ifdef VBOX_BUGREF_9217
1313 PVMCPU pVCpu = pGVCpu;
1314#else
1315 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1316#endif
1317 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1318 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1319 && pVCpu->hNativeThreadR0 == hNativeThread))
1320 { /* likely */ }
1321 else
1322 {
1323 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
1324 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
1325 return;
1326 }
1327
1328 /*
1329 * SMAP fun.
1330 */
1331 VMM_CHECK_SMAP_SETUP();
1332 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1333
1334 /*
1335 * Perform requested operation.
1336 */
1337 switch (enmOperation)
1338 {
1339 /*
1340 * Run guest code using the available hardware acceleration technology.
1341 */
1342 case VMMR0_DO_HM_RUN:
1343 {
1344 for (;;) /* hlt loop */
1345 {
1346 /*
1347 * Disable preemption.
1348 */
1349 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1350 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1351 RTThreadPreemptDisable(&PreemptState);
1352
1353 /*
1354 * Get the host CPU identifiers, make sure they are valid and that
1355 * we've got a TSC delta for the CPU.
1356 */
1357 RTCPUID idHostCpu;
1358 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1359 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1360 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1361 {
1362 pVCpu->iHostCpuSet = iHostCpuSet;
1363 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1364
1365 /*
1366 * Update the periodic preemption timer if it's active.
1367 */
1368 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1369 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1370 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1371
1372#ifdef VMM_R0_TOUCH_FPU
1373 /*
1374 * Make sure we've got the FPU state loaded so and we don't need to clear
1375 * CR0.TS and get out of sync with the host kernel when loading the guest
1376 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1377 */
1378 CPUMR0TouchHostFpu();
1379#endif
1380 int rc;
1381 bool fPreemptRestored = false;
1382 if (!HMR0SuspendPending())
1383 {
1384 /*
1385 * Enable the context switching hook.
1386 */
1387 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1388 {
1389 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1390 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1391 }
1392
1393 /*
1394 * Enter HM context.
1395 */
1396 rc = HMR0Enter(pVCpu);
1397 if (RT_SUCCESS(rc))
1398 {
1399 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1400
1401 /*
1402 * When preemption hooks are in place, enable preemption now that
1403 * we're in HM context.
1404 */
1405 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1406 {
1407 fPreemptRestored = true;
1408 RTThreadPreemptRestore(&PreemptState);
1409 }
1410
1411 /*
1412 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1413 */
1414 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1415 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1416 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1417
1418 /*
1419 * Assert sanity on the way out. Using manual assertions code here as normal
1420 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1421 */
1422 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1423 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1424 {
1425 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1426 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1427 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1428 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1429 }
1430 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1431 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1432 {
1433 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1434 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1435 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1436 rc = VERR_INVALID_STATE;
1437 }
1438
1439 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1440 }
1441 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
1442
1443 /*
1444 * Invalidate the host CPU identifiers before we disable the context
1445 * hook / restore preemption.
1446 */
1447 pVCpu->iHostCpuSet = UINT32_MAX;
1448 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1449
1450 /*
1451 * Disable context hooks. Due to unresolved cleanup issues, we
1452 * cannot leave the hooks enabled when we return to ring-3.
1453 *
1454 * Note! At the moment HM may also have disabled the hook
1455 * when we get here, but the IPRT API handles that.
1456 */
1457 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1458 {
1459 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1460 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1461 }
1462 }
1463 /*
1464 * The system is about to go into suspend mode; go back to ring 3.
1465 */
1466 else
1467 {
1468 rc = VINF_EM_RAW_INTERRUPT;
1469 pVCpu->iHostCpuSet = UINT32_MAX;
1470 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1471 }
1472
1473 /** @todo When HM stops messing with the context hook state, we'll disable
1474 * preemption again before the RTThreadCtxHookDisable call. */
1475 if (!fPreemptRestored)
1476 RTThreadPreemptRestore(&PreemptState);
1477
1478 pVCpu->vmm.s.iLastGZRc = rc;
1479
1480 /* Fire dtrace probe and collect statistics. */
1481 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1482#ifdef VBOX_WITH_STATISTICS
1483 vmmR0RecordRC(pVM, pVCpu, rc);
1484#endif
1485#if 1
1486 /*
1487 * If this is a halt.
1488 */
1489 if (rc != VINF_EM_HALT)
1490 { /* we're not in a hurry for a HLT, so prefer this path */ }
1491 else
1492 {
1493 pVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu);
1494 if (rc == VINF_SUCCESS)
1495 {
1496 pVCpu->vmm.s.cR0HaltsSucceeded++;
1497 continue;
1498 }
1499 pVCpu->vmm.s.cR0HaltsToRing3++;
1500 }
1501#endif
1502 }
1503 /*
1504 * Invalid CPU set index or TSC delta in need of measuring.
1505 */
1506 else
1507 {
1508 pVCpu->iHostCpuSet = UINT32_MAX;
1509 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1510 RTThreadPreemptRestore(&PreemptState);
1511 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1512 {
1513 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1514 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1515 0 /*default cTries*/);
1516 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1517 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1518 else
1519 pVCpu->vmm.s.iLastGZRc = rc;
1520 }
1521 else
1522 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1523 }
1524 break;
1525
1526 } /* halt loop. */
1527 break;
1528 }
1529
1530#ifdef VBOX_WITH_NEM_R0
1531# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1532 case VMMR0_DO_NEM_RUN:
1533 {
1534 /*
1535 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1536 */
1537 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1538 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1539 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1540 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
1541
1542 pVCpu->vmm.s.iLastGZRc = rc;
1543
1544 /*
1545 * Fire dtrace probe and collect statistics.
1546 */
1547 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1548# ifdef VBOX_WITH_STATISTICS
1549 vmmR0RecordRC(pVM, pVCpu, rc);
1550# endif
1551 break;
1552 }
1553# endif
1554#endif
1555
1556 /*
1557 * For profiling.
1558 */
1559 case VMMR0_DO_NOP:
1560 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1561 break;
1562
1563 /*
1564 * Shouldn't happen.
1565 */
1566 default:
1567 AssertMsgFailed(("%#x\n", enmOperation));
1568 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1569 break;
1570 }
1571 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1572}
1573
1574
1575/**
1576 * Validates a session or VM session argument.
1577 *
1578 * @returns true / false accordingly.
1579 * @param pVM The cross context VM structure.
1580 * @param pClaimedSession The session claim to validate.
1581 * @param pSession The session argument.
1582 */
1583DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1584{
1585 /* This must be set! */
1586 if (!pSession)
1587 return false;
1588
1589 /* Only one out of the two. */
1590 if (pVM && pClaimedSession)
1591 return false;
1592 if (pVM)
1593 pClaimedSession = pVM->pSession;
1594 return pClaimedSession == pSession;
1595}
1596
1597
1598/**
1599 * VMMR0EntryEx worker function, either called directly or when ever possible
1600 * called thru a longjmp so we can exit safely on failure.
1601 *
1602 * @returns VBox status code.
1603 * @param pGVM The global (ring-0) VM structure.
1604 * @param pVM The cross context VM structure.
1605 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1606 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1607 * @param enmOperation Which operation to execute.
1608 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1609 * The support driver validates this if it's present.
1610 * @param u64Arg Some simple constant argument.
1611 * @param pSession The session of the caller.
1612 *
1613 * @remarks Assume called with interrupts _enabled_.
1614 */
1615static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1616 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1617{
1618 /*
1619 * Validate pGVM, pVM and idCpu for consistency and validity.
1620 */
1621 if ( pGVM != NULL
1622 || pVM != NULL)
1623 {
1624 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1625 && RT_VALID_PTR(pVM)
1626 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1627 { /* likely */ }
1628 else
1629 {
1630 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1631 return VERR_INVALID_POINTER;
1632 }
1633
1634#ifdef VBOX_BUGREF_9217
1635 if (RT_LIKELY(pGVM == pVM))
1636#else
1637 if (RT_LIKELY(pGVM->pVM == pVM))
1638#endif
1639 { /* likely */ }
1640 else
1641 {
1642#ifdef VBOX_BUGREF_9217
1643 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM/pVM=%p\n", pVM, pGVM);
1644#else
1645 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1646#endif
1647 return VERR_INVALID_PARAMETER;
1648 }
1649
1650 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1651 { /* likely */ }
1652 else
1653 {
1654 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1655 return VERR_INVALID_PARAMETER;
1656 }
1657
1658 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1659 && pVM->enmVMState <= VMSTATE_TERMINATED
1660 && pVM->cCpus == pGVM->cCpus
1661 && pVM->pSession == pSession
1662 && pVM->pVMR0 == pVM))
1663 { /* likely */ }
1664 else
1665 {
1666 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1667 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1668 return VERR_INVALID_POINTER;
1669 }
1670 }
1671 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1672 { /* likely */ }
1673 else
1674 {
1675 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1676 return VERR_INVALID_PARAMETER;
1677 }
1678
1679 /*
1680 * SMAP fun.
1681 */
1682 VMM_CHECK_SMAP_SETUP();
1683 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1684
1685 /*
1686 * Process the request.
1687 */
1688 int rc;
1689 switch (enmOperation)
1690 {
1691 /*
1692 * GVM requests
1693 */
1694 case VMMR0_DO_GVMM_CREATE_VM:
1695 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1696 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1697 else
1698 rc = VERR_INVALID_PARAMETER;
1699 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1700 break;
1701
1702 case VMMR0_DO_GVMM_DESTROY_VM:
1703 if (pReqHdr == NULL && u64Arg == 0)
1704 rc = GVMMR0DestroyVM(pGVM, pVM);
1705 else
1706 rc = VERR_INVALID_PARAMETER;
1707 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1708 break;
1709
1710 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1711 if (pGVM != NULL && pVM != NULL)
1712 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1713 else
1714 rc = VERR_INVALID_PARAMETER;
1715 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1716 break;
1717
1718 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1719 if (pGVM != NULL && pVM != NULL)
1720 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1721 else
1722 rc = VERR_INVALID_PARAMETER;
1723 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1724 break;
1725
1726 case VMMR0_DO_GVMM_SCHED_HALT:
1727 if (pReqHdr)
1728 return VERR_INVALID_PARAMETER;
1729 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1730 rc = GVMMR0SchedHaltReq(pGVM, pVM, idCpu, u64Arg);
1731 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1732 break;
1733
1734 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1735 if (pReqHdr || u64Arg)
1736 return VERR_INVALID_PARAMETER;
1737 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1738 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1739 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1740 break;
1741
1742 case VMMR0_DO_GVMM_SCHED_POKE:
1743 if (pReqHdr || u64Arg)
1744 return VERR_INVALID_PARAMETER;
1745 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1746 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1747 break;
1748
1749 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1750 if (u64Arg)
1751 return VERR_INVALID_PARAMETER;
1752 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1753 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1754 break;
1755
1756 case VMMR0_DO_GVMM_SCHED_POLL:
1757 if (pReqHdr || u64Arg > 1)
1758 return VERR_INVALID_PARAMETER;
1759 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1760 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1761 break;
1762
1763 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1764 if (u64Arg)
1765 return VERR_INVALID_PARAMETER;
1766 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1767 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1768 break;
1769
1770 case VMMR0_DO_GVMM_RESET_STATISTICS:
1771 if (u64Arg)
1772 return VERR_INVALID_PARAMETER;
1773 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1774 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1775 break;
1776
1777 /*
1778 * Initialize the R0 part of a VM instance.
1779 */
1780 case VMMR0_DO_VMMR0_INIT:
1781 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1782 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1783 break;
1784
1785 /*
1786 * Does EMT specific ring-0 init.
1787 */
1788 case VMMR0_DO_VMMR0_INIT_EMT:
1789 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
1790 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1791 break;
1792
1793 /*
1794 * Terminate the R0 part of a VM instance.
1795 */
1796 case VMMR0_DO_VMMR0_TERM:
1797 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1798 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1799 break;
1800
1801 /*
1802 * Attempt to enable hm mode and check the current setting.
1803 */
1804 case VMMR0_DO_HM_ENABLE:
1805 rc = HMR0EnableAllCpus(pVM);
1806 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1807 break;
1808
1809 /*
1810 * Setup the hardware accelerated session.
1811 */
1812 case VMMR0_DO_HM_SETUP_VM:
1813 rc = HMR0SetupVM(pVM);
1814 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1815 break;
1816
1817 /*
1818 * PGM wrappers.
1819 */
1820 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1821 if (idCpu == NIL_VMCPUID)
1822 return VERR_INVALID_CPU_ID;
1823 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1824 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1825 break;
1826
1827 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1828 if (idCpu == NIL_VMCPUID)
1829 return VERR_INVALID_CPU_ID;
1830 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1831 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1832 break;
1833
1834 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1835 if (idCpu == NIL_VMCPUID)
1836 return VERR_INVALID_CPU_ID;
1837 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1838 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1839 break;
1840
1841 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1842 if (idCpu != 0)
1843 return VERR_INVALID_CPU_ID;
1844 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1845 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1846 break;
1847
1848 /*
1849 * GMM wrappers.
1850 */
1851 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1852 if (u64Arg)
1853 return VERR_INVALID_PARAMETER;
1854 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1855 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1856 break;
1857
1858 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1859 if (u64Arg)
1860 return VERR_INVALID_PARAMETER;
1861 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1862 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1863 break;
1864
1865 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1866 if (u64Arg)
1867 return VERR_INVALID_PARAMETER;
1868 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1869 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1870 break;
1871
1872 case VMMR0_DO_GMM_FREE_PAGES:
1873 if (u64Arg)
1874 return VERR_INVALID_PARAMETER;
1875 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1876 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1877 break;
1878
1879 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1880 if (u64Arg)
1881 return VERR_INVALID_PARAMETER;
1882 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1883 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1884 break;
1885
1886 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1887 if (u64Arg)
1888 return VERR_INVALID_PARAMETER;
1889 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1890 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1891 break;
1892
1893 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1894 if (idCpu == NIL_VMCPUID)
1895 return VERR_INVALID_CPU_ID;
1896 if (u64Arg)
1897 return VERR_INVALID_PARAMETER;
1898 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1899 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1900 break;
1901
1902 case VMMR0_DO_GMM_BALLOONED_PAGES:
1903 if (u64Arg)
1904 return VERR_INVALID_PARAMETER;
1905 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1906 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1907 break;
1908
1909 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1910 if (u64Arg)
1911 return VERR_INVALID_PARAMETER;
1912 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1913 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1914 break;
1915
1916 case VMMR0_DO_GMM_SEED_CHUNK:
1917 if (pReqHdr)
1918 return VERR_INVALID_PARAMETER;
1919 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1920 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1921 break;
1922
1923 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1924 if (idCpu == NIL_VMCPUID)
1925 return VERR_INVALID_CPU_ID;
1926 if (u64Arg)
1927 return VERR_INVALID_PARAMETER;
1928 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1929 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1930 break;
1931
1932 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1933 if (idCpu == NIL_VMCPUID)
1934 return VERR_INVALID_CPU_ID;
1935 if (u64Arg)
1936 return VERR_INVALID_PARAMETER;
1937 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1938 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1939 break;
1940
1941 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1942 if (idCpu == NIL_VMCPUID)
1943 return VERR_INVALID_CPU_ID;
1944 if ( u64Arg
1945 || pReqHdr)
1946 return VERR_INVALID_PARAMETER;
1947 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1948 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1949 break;
1950
1951#ifdef VBOX_WITH_PAGE_SHARING
1952 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1953 {
1954 if (idCpu == NIL_VMCPUID)
1955 return VERR_INVALID_CPU_ID;
1956 if ( u64Arg
1957 || pReqHdr)
1958 return VERR_INVALID_PARAMETER;
1959 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1960 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1961 break;
1962 }
1963#endif
1964
1965#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1966 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1967 if (u64Arg)
1968 return VERR_INVALID_PARAMETER;
1969 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1970 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1971 break;
1972#endif
1973
1974 case VMMR0_DO_GMM_QUERY_STATISTICS:
1975 if (u64Arg)
1976 return VERR_INVALID_PARAMETER;
1977 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1978 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1979 break;
1980
1981 case VMMR0_DO_GMM_RESET_STATISTICS:
1982 if (u64Arg)
1983 return VERR_INVALID_PARAMETER;
1984 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1985 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1986 break;
1987
1988 /*
1989 * A quick GCFGM mock-up.
1990 */
1991 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1992 case VMMR0_DO_GCFGM_SET_VALUE:
1993 case VMMR0_DO_GCFGM_QUERY_VALUE:
1994 {
1995 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1996 return VERR_INVALID_PARAMETER;
1997 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1998 if (pReq->Hdr.cbReq != sizeof(*pReq))
1999 return VERR_INVALID_PARAMETER;
2000 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2001 {
2002 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2003 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2004 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2005 }
2006 else
2007 {
2008 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2009 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2010 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2011 }
2012 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2013 break;
2014 }
2015
2016 /*
2017 * PDM Wrappers.
2018 */
2019 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2020 {
2021 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2022 return VERR_INVALID_PARAMETER;
2023 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2024 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2025 break;
2026 }
2027
2028 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
2029 {
2030 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2031 return VERR_INVALID_PARAMETER;
2032 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
2033 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2034 break;
2035 }
2036
2037 /*
2038 * Requests to the internal networking service.
2039 */
2040 case VMMR0_DO_INTNET_OPEN:
2041 {
2042 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2043 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2044 return VERR_INVALID_PARAMETER;
2045 rc = IntNetR0OpenReq(pSession, pReq);
2046 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2047 break;
2048 }
2049
2050 case VMMR0_DO_INTNET_IF_CLOSE:
2051 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2052 return VERR_INVALID_PARAMETER;
2053 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2054 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2055 break;
2056
2057
2058 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2059 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2060 return VERR_INVALID_PARAMETER;
2061 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2062 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2063 break;
2064
2065 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2066 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2067 return VERR_INVALID_PARAMETER;
2068 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2069 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2070 break;
2071
2072 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2073 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2074 return VERR_INVALID_PARAMETER;
2075 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2076 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2077 break;
2078
2079 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2080 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2081 return VERR_INVALID_PARAMETER;
2082 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2083 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2084 break;
2085
2086 case VMMR0_DO_INTNET_IF_SEND:
2087 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2088 return VERR_INVALID_PARAMETER;
2089 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2090 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2091 break;
2092
2093 case VMMR0_DO_INTNET_IF_WAIT:
2094 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2095 return VERR_INVALID_PARAMETER;
2096 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2097 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2098 break;
2099
2100 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2101 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2102 return VERR_INVALID_PARAMETER;
2103 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2104 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2105 break;
2106
2107#ifdef VBOX_WITH_PCI_PASSTHROUGH
2108 /*
2109 * Requests to host PCI driver service.
2110 */
2111 case VMMR0_DO_PCIRAW_REQ:
2112 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2113 return VERR_INVALID_PARAMETER;
2114 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2115 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2116 break;
2117#endif
2118
2119 /*
2120 * NEM requests.
2121 */
2122#ifdef VBOX_WITH_NEM_R0
2123# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2124 case VMMR0_DO_NEM_INIT_VM:
2125 if (u64Arg || pReqHdr || idCpu != 0)
2126 return VERR_INVALID_PARAMETER;
2127 rc = NEMR0InitVM(pGVM, pVM);
2128 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2129 break;
2130
2131 case VMMR0_DO_NEM_INIT_VM_PART_2:
2132 if (u64Arg || pReqHdr || idCpu != 0)
2133 return VERR_INVALID_PARAMETER;
2134 rc = NEMR0InitVMPart2(pGVM, pVM);
2135 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2136 break;
2137
2138 case VMMR0_DO_NEM_MAP_PAGES:
2139 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2140 return VERR_INVALID_PARAMETER;
2141 rc = NEMR0MapPages(pGVM, pVM, idCpu);
2142 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2143 break;
2144
2145 case VMMR0_DO_NEM_UNMAP_PAGES:
2146 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2147 return VERR_INVALID_PARAMETER;
2148 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
2149 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2150 break;
2151
2152 case VMMR0_DO_NEM_EXPORT_STATE:
2153 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2154 return VERR_INVALID_PARAMETER;
2155 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2156 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2157 break;
2158
2159 case VMMR0_DO_NEM_IMPORT_STATE:
2160 if (pReqHdr || idCpu == NIL_VMCPUID)
2161 return VERR_INVALID_PARAMETER;
2162 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2163 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2164 break;
2165
2166 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2167 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2168 return VERR_INVALID_PARAMETER;
2169 rc = NEMR0QueryCpuTick(pGVM, pVM, idCpu);
2170 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2171 break;
2172
2173 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2174 if (pReqHdr || idCpu == NIL_VMCPUID)
2175 return VERR_INVALID_PARAMETER;
2176 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg);
2177 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2178 break;
2179
2180 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2181 if (u64Arg || pReqHdr)
2182 return VERR_INVALID_PARAMETER;
2183 rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu);
2184 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2185 break;
2186
2187# if 1 && defined(DEBUG_bird)
2188 case VMMR0_DO_NEM_EXPERIMENT:
2189 if (pReqHdr)
2190 return VERR_INVALID_PARAMETER;
2191 rc = NEMR0DoExperiment(pGVM, pVM, idCpu, u64Arg);
2192 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2193 break;
2194# endif
2195# endif
2196#endif
2197
2198 /*
2199 * For profiling.
2200 */
2201 case VMMR0_DO_NOP:
2202 case VMMR0_DO_SLOW_NOP:
2203 return VINF_SUCCESS;
2204
2205 /*
2206 * For testing Ring-0 APIs invoked in this environment.
2207 */
2208 case VMMR0_DO_TESTS:
2209 /** @todo make new test */
2210 return VINF_SUCCESS;
2211
2212 default:
2213 /*
2214 * We're returning VERR_NOT_SUPPORT here so we've got something else
2215 * than -1 which the interrupt gate glue code might return.
2216 */
2217 Log(("operation %#x is not supported\n", enmOperation));
2218 return VERR_NOT_SUPPORTED;
2219 }
2220 return rc;
2221}
2222
2223
2224/**
2225 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2226 */
2227typedef struct VMMR0ENTRYEXARGS
2228{
2229 PGVM pGVM;
2230 PVM pVM;
2231 VMCPUID idCpu;
2232 VMMR0OPERATION enmOperation;
2233 PSUPVMMR0REQHDR pReq;
2234 uint64_t u64Arg;
2235 PSUPDRVSESSION pSession;
2236} VMMR0ENTRYEXARGS;
2237/** Pointer to a vmmR0EntryExWrapper argument package. */
2238typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2239
2240/**
2241 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2242 *
2243 * @returns VBox status code.
2244 * @param pvArgs The argument package
2245 */
2246static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2247{
2248 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2249 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2250 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2251 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2252 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2253 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2254 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2255}
2256
2257
2258/**
2259 * The Ring 0 entry point, called by the support library (SUP).
2260 *
2261 * @returns VBox status code.
2262 * @param pGVM The global (ring-0) VM structure.
2263 * @param pVM The cross context VM structure.
2264 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2265 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2266 * @param enmOperation Which operation to execute.
2267 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2268 * @param u64Arg Some simple constant argument.
2269 * @param pSession The session of the caller.
2270 * @remarks Assume called with interrupts _enabled_.
2271 */
2272VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2273 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2274{
2275 /*
2276 * Requests that should only happen on the EMT thread will be
2277 * wrapped in a setjmp so we can assert without causing trouble.
2278 */
2279 if ( pVM != NULL
2280 && pGVM != NULL
2281 && idCpu < pGVM->cCpus
2282 && pVM->pVMR0 != NULL)
2283 {
2284 switch (enmOperation)
2285 {
2286 /* These might/will be called before VMMR3Init. */
2287 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2288 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2289 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2290 case VMMR0_DO_GMM_FREE_PAGES:
2291 case VMMR0_DO_GMM_BALLOONED_PAGES:
2292 /* On the mac we might not have a valid jmp buf, so check these as well. */
2293 case VMMR0_DO_VMMR0_INIT:
2294 case VMMR0_DO_VMMR0_TERM:
2295 {
2296 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2297#ifdef VBOX_BUGREF_9217
2298 PVMCPU pVCpu = pGVCpu;
2299#else
2300 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2301#endif
2302 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2303 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2304 && pVCpu->hNativeThreadR0 == hNativeThread))
2305 {
2306 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2307 break;
2308
2309 /** @todo validate this EMT claim... GVM knows. */
2310 VMMR0ENTRYEXARGS Args;
2311 Args.pGVM = pGVM;
2312 Args.pVM = pVM;
2313 Args.idCpu = idCpu;
2314 Args.enmOperation = enmOperation;
2315 Args.pReq = pReq;
2316 Args.u64Arg = u64Arg;
2317 Args.pSession = pSession;
2318 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2319 }
2320 return VERR_VM_THREAD_NOT_EMT;
2321 }
2322
2323 default:
2324 break;
2325 }
2326 }
2327 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2328}
2329
2330
2331/**
2332 * Checks whether we've armed the ring-0 long jump machinery.
2333 *
2334 * @returns @c true / @c false
2335 * @param pVCpu The cross context virtual CPU structure.
2336 * @thread EMT
2337 * @sa VMMIsLongJumpArmed
2338 */
2339VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2340{
2341#ifdef RT_ARCH_X86
2342 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2343 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2344#else
2345 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2346 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2347#endif
2348}
2349
2350
2351/**
2352 * Checks whether we've done a ring-3 long jump.
2353 *
2354 * @returns @c true / @c false
2355 * @param pVCpu The cross context virtual CPU structure.
2356 * @thread EMT
2357 */
2358VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2359{
2360 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2361}
2362
2363
2364/**
2365 * Internal R0 logger worker: Flush logger.
2366 *
2367 * @param pLogger The logger instance to flush.
2368 * @remark This function must be exported!
2369 */
2370VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2371{
2372#ifdef LOG_ENABLED
2373 /*
2374 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2375 * (This is a bit paranoid code.)
2376 */
2377 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2378 if ( !VALID_PTR(pR0Logger)
2379 || !VALID_PTR(pR0Logger + 1)
2380 || pLogger->u32Magic != RTLOGGER_MAGIC)
2381 {
2382# ifdef DEBUG
2383 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2384# endif
2385 return;
2386 }
2387 if (pR0Logger->fFlushingDisabled)
2388 return; /* quietly */
2389
2390 PVM pVM = pR0Logger->pVM;
2391 if ( !VALID_PTR(pVM)
2392 || pVM->pVMR0 != pVM)
2393 {
2394# ifdef DEBUG
2395 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2396# endif
2397 return;
2398 }
2399
2400 PVMCPU pVCpu = VMMGetCpu(pVM);
2401 if (pVCpu)
2402 {
2403 /*
2404 * Check that the jump buffer is armed.
2405 */
2406# ifdef RT_ARCH_X86
2407 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2408 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2409# else
2410 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2411 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2412# endif
2413 {
2414# ifdef DEBUG
2415 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2416# endif
2417 return;
2418 }
2419 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2420 }
2421# ifdef DEBUG
2422 else
2423 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2424# endif
2425#else
2426 NOREF(pLogger);
2427#endif /* LOG_ENABLED */
2428}
2429
2430#ifdef LOG_ENABLED
2431
2432/**
2433 * Disables flushing of the ring-0 debug log.
2434 *
2435 * @param pVCpu The cross context virtual CPU structure.
2436 */
2437VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2438{
2439 if (pVCpu->vmm.s.pR0LoggerR0)
2440 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2441 if (pVCpu->vmm.s.pR0RelLoggerR0)
2442 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2443}
2444
2445
2446/**
2447 * Enables flushing of the ring-0 debug log.
2448 *
2449 * @param pVCpu The cross context virtual CPU structure.
2450 */
2451VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2452{
2453 if (pVCpu->vmm.s.pR0LoggerR0)
2454 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2455 if (pVCpu->vmm.s.pR0RelLoggerR0)
2456 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2457}
2458
2459
2460/**
2461 * Checks if log flushing is disabled or not.
2462 *
2463 * @param pVCpu The cross context virtual CPU structure.
2464 */
2465VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2466{
2467 if (pVCpu->vmm.s.pR0LoggerR0)
2468 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2469 if (pVCpu->vmm.s.pR0RelLoggerR0)
2470 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2471 return true;
2472}
2473
2474#endif /* LOG_ENABLED */
2475
2476/**
2477 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2478 */
2479DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2480{
2481 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2482 if (pGVCpu)
2483 {
2484#ifdef VBOX_BUGREF_9217
2485 PVMCPU pVCpu = pGVCpu;
2486#else
2487 PVMCPU pVCpu = pGVCpu->pVCpu;
2488#endif
2489 if (RT_VALID_PTR(pVCpu))
2490 {
2491 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2492 if (RT_VALID_PTR(pVmmLogger))
2493 {
2494 if ( pVmmLogger->fCreated
2495#ifdef VBOX_BUGREF_9217
2496 && pVmmLogger->pVM == pGVCpu->pGVM
2497#else
2498 && pVmmLogger->pVM == pGVCpu->pVM
2499#endif
2500 )
2501 {
2502 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2503 return NULL;
2504 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2505 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2506 if ( iGroup != UINT16_MAX
2507 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2508 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2509 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2510 return NULL;
2511 return &pVmmLogger->Logger;
2512 }
2513 }
2514 }
2515 }
2516 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2517}
2518
2519
2520/**
2521 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2522 *
2523 * @returns true if the breakpoint should be hit, false if it should be ignored.
2524 */
2525DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2526{
2527#if 0
2528 return true;
2529#else
2530 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2531 if (pVM)
2532 {
2533 PVMCPU pVCpu = VMMGetCpu(pVM);
2534
2535 if (pVCpu)
2536 {
2537#ifdef RT_ARCH_X86
2538 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2539 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2540#else
2541 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2542 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2543#endif
2544 {
2545 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2546 return RT_FAILURE_NP(rc);
2547 }
2548 }
2549 }
2550#ifdef RT_OS_LINUX
2551 return true;
2552#else
2553 return false;
2554#endif
2555#endif
2556}
2557
2558
2559/**
2560 * Override this so we can push it up to ring-3.
2561 *
2562 * @param pszExpr Expression. Can be NULL.
2563 * @param uLine Location line number.
2564 * @param pszFile Location file name.
2565 * @param pszFunction Location function name.
2566 */
2567DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2568{
2569 /*
2570 * To the log.
2571 */
2572 LogAlways(("\n!!R0-Assertion Failed!!\n"
2573 "Expression: %s\n"
2574 "Location : %s(%d) %s\n",
2575 pszExpr, pszFile, uLine, pszFunction));
2576
2577 /*
2578 * To the global VMM buffer.
2579 */
2580 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2581 if (pVM)
2582 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2583 "\n!!R0-Assertion Failed!!\n"
2584 "Expression: %.*s\n"
2585 "Location : %s(%d) %s\n",
2586 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2587 pszFile, uLine, pszFunction);
2588
2589 /*
2590 * Continue the normal way.
2591 */
2592 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2593}
2594
2595
2596/**
2597 * Callback for RTLogFormatV which writes to the ring-3 log port.
2598 * See PFNLOGOUTPUT() for details.
2599 */
2600static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2601{
2602 for (size_t i = 0; i < cbChars; i++)
2603 {
2604 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2605 }
2606
2607 NOREF(pv);
2608 return cbChars;
2609}
2610
2611
2612/**
2613 * Override this so we can push it up to ring-3.
2614 *
2615 * @param pszFormat The format string.
2616 * @param va Arguments.
2617 */
2618DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2619{
2620 va_list vaCopy;
2621
2622 /*
2623 * Push the message to the loggers.
2624 */
2625 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2626 if (pLog)
2627 {
2628 va_copy(vaCopy, va);
2629 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2630 va_end(vaCopy);
2631 }
2632 pLog = RTLogRelGetDefaultInstance();
2633 if (pLog)
2634 {
2635 va_copy(vaCopy, va);
2636 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2637 va_end(vaCopy);
2638 }
2639
2640 /*
2641 * Push it to the global VMM buffer.
2642 */
2643 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2644 if (pVM)
2645 {
2646 va_copy(vaCopy, va);
2647 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2648 va_end(vaCopy);
2649 }
2650
2651 /*
2652 * Continue the normal way.
2653 */
2654 RTAssertMsg2V(pszFormat, va);
2655}
2656
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette