VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 80333

Last change on this file since 80333 was 80333, checked in by vboxsync, 5 years ago

VMM: Eliminating the VBOX_BUGREF_9217_PART_I preprocessor macro. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 96.8 KB
Line 
1/* $Id: VMMR0.cpp 80333 2019-08-16 20:28:38Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/stam.h>
34#include <VBox/vmm/tm.h>
35#include "VMMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include <VBox/vmm/gvm.h>
38#ifdef VBOX_WITH_PCI_PASSTHROUGH
39# include <VBox/vmm/pdmpci.h>
40#endif
41#include <VBox/vmm/apic.h>
42
43#include <VBox/vmm/gvmm.h>
44#include <VBox/vmm/gmm.h>
45#include <VBox/vmm/gim.h>
46#include <VBox/intnet.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <VBox/version.h>
51#include <VBox/log.h>
52
53#include <iprt/asm-amd64-x86.h>
54#include <iprt/assert.h>
55#include <iprt/crc.h>
56#include <iprt/mp.h>
57#include <iprt/once.h>
58#include <iprt/stdarg.h>
59#include <iprt/string.h>
60#include <iprt/thread.h>
61#include <iprt/timer.h>
62#include <iprt/time.h>
63
64#include "dtrace/VBoxVMM.h"
65
66
67#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
68# pragma intrinsic(_AddressOfReturnAddress)
69#endif
70
71#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
72# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
73#endif
74
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80/** @def VMM_CHECK_SMAP_SETUP
81 * SMAP check setup. */
82/** @def VMM_CHECK_SMAP_CHECK
83 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
84 * it will be logged and @a a_BadExpr is executed. */
85/** @def VMM_CHECK_SMAP_CHECK2
86 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
87 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
88 * executed. */
89#if defined(VBOX_STRICT) || 1
90# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
91# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
92 do { \
93 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
94 { \
95 RTCCUINTREG fEflCheck = ASMGetFlags(); \
96 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
97 { /* likely */ } \
98 else \
99 { \
100 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
101 a_BadExpr; \
102 } \
103 } \
104 } while (0)
105# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
106 do { \
107 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
108 { \
109 RTCCUINTREG fEflCheck = ASMGetFlags(); \
110 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
111 { /* likely */ } \
112 else \
113 { \
114 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
115 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
117 a_BadExpr; \
118 } \
119 } \
120 } while (0)
121#else
122# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
123# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
124# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
125#endif
126
127
128/*********************************************************************************************************************************
129* Internal Functions *
130*********************************************************************************************************************************/
131RT_C_DECLS_BEGIN
132#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
133extern uint64_t __udivdi3(uint64_t, uint64_t);
134extern uint64_t __umoddi3(uint64_t, uint64_t);
135#endif
136RT_C_DECLS_END
137
138
139/*********************************************************************************************************************************
140* Global Variables *
141*********************************************************************************************************************************/
142/** Drag in necessary library bits.
143 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
144PFNRT g_VMMR0Deps[] =
145{
146 (PFNRT)RTCrc32,
147 (PFNRT)RTOnce,
148#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
149 (PFNRT)__udivdi3,
150 (PFNRT)__umoddi3,
151#endif
152 NULL
153};
154
155#ifdef RT_OS_SOLARIS
156/* Dependency information for the native solaris loader. */
157extern "C" { char _depends_on[] = "vboxdrv"; }
158#endif
159
160/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
161int g_rcRawModeUsability = VINF_SUCCESS;
162
163
164/**
165 * Initialize the module.
166 * This is called when we're first loaded.
167 *
168 * @returns 0 on success.
169 * @returns VBox status on failure.
170 * @param hMod Image handle for use in APIs.
171 */
172DECLEXPORT(int) ModuleInit(void *hMod)
173{
174 VMM_CHECK_SMAP_SETUP();
175 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
176
177#ifdef VBOX_WITH_DTRACE_R0
178 /*
179 * The first thing to do is register the static tracepoints.
180 * (Deregistration is automatic.)
181 */
182 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
183 if (RT_FAILURE(rc2))
184 return rc2;
185#endif
186 LogFlow(("ModuleInit:\n"));
187
188#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
189 /*
190 * Display the CMOS debug code.
191 */
192 ASMOutU8(0x72, 0x03);
193 uint8_t bDebugCode = ASMInU8(0x73);
194 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
195 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
196#endif
197
198 /*
199 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
200 */
201 int rc = vmmInitFormatTypes();
202 if (RT_SUCCESS(rc))
203 {
204 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
205 rc = GVMMR0Init();
206 if (RT_SUCCESS(rc))
207 {
208 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
209 rc = GMMR0Init();
210 if (RT_SUCCESS(rc))
211 {
212 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
213 rc = HMR0Init();
214 if (RT_SUCCESS(rc))
215 {
216 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
217 rc = PGMRegisterStringFormatTypes();
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
222 rc = PGMR0DynMapInit();
223#endif
224 if (RT_SUCCESS(rc))
225 {
226 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
227 rc = IntNetR0Init();
228 if (RT_SUCCESS(rc))
229 {
230#ifdef VBOX_WITH_PCI_PASSTHROUGH
231 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
232 rc = PciRawR0Init();
233#endif
234 if (RT_SUCCESS(rc))
235 {
236 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
237 rc = CPUMR0ModuleInit();
238 if (RT_SUCCESS(rc))
239 {
240#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
241 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
242 rc = vmmR0TripleFaultHackInit();
243 if (RT_SUCCESS(rc))
244#endif
245 {
246 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
247 if (RT_SUCCESS(rc))
248 {
249 g_rcRawModeUsability = SUPR0GetRawModeUsability();
250 if (g_rcRawModeUsability != VINF_SUCCESS)
251 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
252 g_rcRawModeUsability);
253 LogFlow(("ModuleInit: returns success\n"));
254 return VINF_SUCCESS;
255 }
256 }
257
258 /*
259 * Bail out.
260 */
261#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
262 vmmR0TripleFaultHackTerm();
263#endif
264 }
265 else
266 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
267#ifdef VBOX_WITH_PCI_PASSTHROUGH
268 PciRawR0Term();
269#endif
270 }
271 else
272 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
273 IntNetR0Term();
274 }
275 else
276 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
277#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
278 PGMR0DynMapTerm();
279#endif
280 }
281 else
282 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
283 PGMDeregisterStringFormatTypes();
284 }
285 else
286 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
287 HMR0Term();
288 }
289 else
290 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
291 GMMR0Term();
292 }
293 else
294 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
295 GVMMR0Term();
296 }
297 else
298 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
299 vmmTermFormatTypes();
300 }
301 else
302 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
303
304 LogFlow(("ModuleInit: failed %Rrc\n", rc));
305 return rc;
306}
307
308
309/**
310 * Terminate the module.
311 * This is called when we're finally unloaded.
312 *
313 * @param hMod Image handle for use in APIs.
314 */
315DECLEXPORT(void) ModuleTerm(void *hMod)
316{
317 NOREF(hMod);
318 LogFlow(("ModuleTerm:\n"));
319
320 /*
321 * Terminate the CPUM module (Local APIC cleanup).
322 */
323 CPUMR0ModuleTerm();
324
325 /*
326 * Terminate the internal network service.
327 */
328 IntNetR0Term();
329
330 /*
331 * PGM (Darwin), HM and PciRaw global cleanup.
332 */
333#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
334 PGMR0DynMapTerm();
335#endif
336#ifdef VBOX_WITH_PCI_PASSTHROUGH
337 PciRawR0Term();
338#endif
339 PGMDeregisterStringFormatTypes();
340 HMR0Term();
341#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
342 vmmR0TripleFaultHackTerm();
343#endif
344
345 /*
346 * Destroy the GMM and GVMM instances.
347 */
348 GMMR0Term();
349 GVMMR0Term();
350
351 vmmTermFormatTypes();
352
353 LogFlow(("ModuleTerm: returns\n"));
354}
355
356
357/**
358 * Initiates the R0 driver for a particular VM instance.
359 *
360 * @returns VBox status code.
361 *
362 * @param pGVM The global (ring-0) VM structure.
363 * @param pVM The cross context VM structure.
364 * @param uSvnRev The SVN revision of the ring-3 part.
365 * @param uBuildType Build type indicator.
366 * @thread EMT(0)
367 */
368static int vmmR0InitVM(PGVM pGVM, PVMCC pVM, uint32_t uSvnRev, uint32_t uBuildType)
369{
370 VMM_CHECK_SMAP_SETUP();
371 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
372
373 /*
374 * Match the SVN revisions and build type.
375 */
376 if (uSvnRev != VMMGetSvnRev())
377 {
378 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
379 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
380 return VERR_VMM_R0_VERSION_MISMATCH;
381 }
382 if (uBuildType != vmmGetBuildType())
383 {
384 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
385 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
386 return VERR_VMM_R0_VERSION_MISMATCH;
387 }
388
389 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
390 if (RT_FAILURE(rc))
391 return rc;
392
393#ifdef LOG_ENABLED
394 /*
395 * Register the EMT R0 logger instance for VCPU 0.
396 */
397#ifdef VBOX_BUGREF_9217
398 PVMCPUCC pVCpu = &pGVM->aCpus[0];
399#else
400 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pVM);
401#endif
402
403 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
404 if (pR0Logger)
405 {
406# if 0 /* testing of the logger. */
407 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
408 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
409 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
410 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
411
412 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
413 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
414 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
415 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
416
417 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
418 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
419 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
420 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
421
422 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
423 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
424 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
425 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
426 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
427 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
428
429 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
430 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
431
432 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
433 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
434 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
435# endif
436 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
437 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
438 pR0Logger->fRegistered = true;
439 }
440#endif /* LOG_ENABLED */
441
442 /*
443 * Check if the host supports high resolution timers or not.
444 */
445 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
446 && !RTTimerCanDoHighResolution())
447 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
448
449 /*
450 * Initialize the per VM data for GVMM and GMM.
451 */
452 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
453 rc = GVMMR0InitVM(pGVM);
454// if (RT_SUCCESS(rc))
455// rc = GMMR0InitPerVMData(pVM);
456 if (RT_SUCCESS(rc))
457 {
458 /*
459 * Init HM, CPUM and PGM (Darwin only).
460 */
461 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
462 rc = HMR0InitVM(pVM);
463 if (RT_SUCCESS(rc))
464 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
465 if (RT_SUCCESS(rc))
466 {
467 rc = CPUMR0InitVM(pVM);
468 if (RT_SUCCESS(rc))
469 {
470 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
471#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
472 rc = PGMR0DynMapInitVM(pVM);
473#endif
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
477#ifdef VBOX_BUGREF_9217
478 rc = EMR0InitVM(pGVM);
479#else
480 rc = EMR0InitVM(pGVM, pVM);
481#endif
482 if (RT_SUCCESS(rc))
483 {
484 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
485#ifdef VBOX_WITH_PCI_PASSTHROUGH
486 rc = PciRawR0InitVM(pGVM, pVM);
487#endif
488 if (RT_SUCCESS(rc))
489 {
490 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
491 rc = GIMR0InitVM(pVM);
492 if (RT_SUCCESS(rc))
493 {
494 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
495 if (RT_SUCCESS(rc))
496 {
497 GVMMR0DoneInitVM(pGVM);
498
499 /*
500 * Collect a bit of info for the VM release log.
501 */
502 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
503 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
504
505 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
506 return rc;
507 }
508
509 /* bail out*/
510 GIMR0TermVM(pVM);
511 }
512#ifdef VBOX_WITH_PCI_PASSTHROUGH
513 PciRawR0TermVM(pGVM, pVM);
514#endif
515 }
516 }
517 }
518 }
519 HMR0TermVM(pVM);
520 }
521 }
522
523 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
524 return rc;
525}
526
527
528/**
529 * Does EMT specific VM initialization.
530 *
531 * @returns VBox status code.
532 * @param pGVM The ring-0 VM structure.
533 * @param pVM The cross context VM structure.
534 * @param idCpu The EMT that's calling.
535 */
536static int vmmR0InitVMEmt(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
537{
538 /* Paranoia (caller checked these already). */
539 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
540 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
541
542#ifdef LOG_ENABLED
543 /*
544 * Registration of ring 0 loggers.
545 */
546#ifdef VBOX_BUGREF_9217
547 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
548#else
549 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
550#endif
551 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
552 if ( pR0Logger
553 && !pR0Logger->fRegistered)
554 {
555 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
556 pR0Logger->fRegistered = true;
557 }
558#endif
559 RT_NOREF(pVM);
560
561 return VINF_SUCCESS;
562}
563
564
565
566/**
567 * Terminates the R0 bits for a particular VM instance.
568 *
569 * This is normally called by ring-3 as part of the VM termination process, but
570 * may alternatively be called during the support driver session cleanup when
571 * the VM object is destroyed (see GVMM).
572 *
573 * @returns VBox status code.
574 *
575 * @param pGVM The global (ring-0) VM structure.
576 * @param pVM The cross context VM structure.
577 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
578 * thread.
579 * @thread EMT(0) or session clean up thread.
580 */
581VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
582{
583 /*
584 * Check EMT(0) claim if we're called from userland.
585 */
586 if (idCpu != NIL_VMCPUID)
587 {
588 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
589 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
590 if (RT_FAILURE(rc))
591 return rc;
592 }
593
594#ifdef VBOX_WITH_PCI_PASSTHROUGH
595 PciRawR0TermVM(pGVM, pVM);
596#endif
597
598 /*
599 * Tell GVMM what we're up to and check that we only do this once.
600 */
601 if (GVMMR0DoingTermVM(pGVM))
602 {
603 GIMR0TermVM(pVM);
604
605 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
606 * here to make sure we don't leak any shared pages if we crash... */
607#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
608 PGMR0DynMapTermVM(pVM);
609#endif
610 HMR0TermVM(pVM);
611 }
612
613 /*
614 * Deregister the logger.
615 */
616 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
617 return VINF_SUCCESS;
618}
619
620
621/**
622 * An interrupt or unhalt force flag is set, deal with it.
623 *
624 * @returns VINF_SUCCESS (or VINF_EM_HALT).
625 * @param pVCpu The cross context virtual CPU structure.
626 * @param uMWait Result from EMMonitorWaitIsActive().
627 * @param enmInterruptibility Guest CPU interruptbility level.
628 */
629static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
630{
631 Assert(!TRPMHasTrap(pVCpu));
632 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
633 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
634
635 /*
636 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
637 */
638 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
639 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
640 {
641 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
642 {
643 uint8_t u8Interrupt = 0;
644 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
645 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
646 if (RT_SUCCESS(rc))
647 {
648 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
649
650 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
651 AssertRCSuccess(rc);
652 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
653 return rc;
654 }
655 }
656 }
657 /*
658 * SMI is not implemented yet, at least not here.
659 */
660 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
661 {
662 return VINF_EM_HALT;
663 }
664 /*
665 * NMI.
666 */
667 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
668 {
669 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
670 {
671 /** @todo later. */
672 return VINF_EM_HALT;
673 }
674 }
675 /*
676 * Nested-guest virtual interrupt.
677 */
678 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
679 {
680 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
681 {
682 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
683 * here before injecting the virtual interrupt. See emR3ForcedActions
684 * for details. */
685 return VINF_EM_HALT;
686 }
687 }
688
689 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
690 {
691 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
692 return VINF_SUCCESS;
693 }
694 if (uMWait > 1)
695 {
696 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
697 return VINF_SUCCESS;
698 }
699
700 return VINF_EM_HALT;
701}
702
703
704/**
705 * This does one round of vmR3HaltGlobal1Halt().
706 *
707 * The rational here is that we'll reduce latency in interrupt situations if we
708 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
709 * MWAIT), but do one round of blocking here instead and hope the interrupt is
710 * raised in the meanwhile.
711 *
712 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
713 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
714 * ring-0 call (unless we're too close to a timer event). When the interrupt
715 * wakes us up, we'll return from ring-0 and EM will by instinct do a
716 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
717 * back to VMMR0EntryFast().
718 *
719 * @returns VINF_SUCCESS or VINF_EM_HALT.
720 * @param pGVM The ring-0 VM structure.
721 * @param pVM The cross context VM structure.
722 * @param pGVCpu The ring-0 virtual CPU structure.
723 * @param pVCpu The cross context virtual CPU structure.
724 *
725 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
726 * the VM module, probably to VMM. Then this would be more weird wrt
727 * parameters and statistics.
728 */
729static int vmmR0DoHalt(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu)
730{
731#ifdef VBOX_BUGREF_9217
732 Assert(pVCpu == pGVCpu);
733#else
734 Assert(pVCpu == pGVCpu->pVCpu);
735#endif
736
737 /*
738 * Do spin stat historization.
739 */
740 if (++pVCpu->vmm.s.cR0Halts & 0xff)
741 { /* likely */ }
742 else if (pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3)
743 {
744 pVCpu->vmm.s.cR0HaltsSucceeded = 2;
745 pVCpu->vmm.s.cR0HaltsToRing3 = 0;
746 }
747 else
748 {
749 pVCpu->vmm.s.cR0HaltsSucceeded = 0;
750 pVCpu->vmm.s.cR0HaltsToRing3 = 2;
751 }
752
753 /*
754 * Flags that makes us go to ring-3.
755 */
756 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
757 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
758 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
759 | VM_FF_PGM_NO_MEMORY | VM_FF_REM_HANDLER_NOTIFY | VM_FF_DEBUG_SUSPEND;
760 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
761 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
762 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
763 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
764
765 /*
766 * Check preconditions.
767 */
768 unsigned const uMWait = EMMonitorWaitIsActive(pVCpu);
769 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pVCpu);
770 if ( pVCpu->vmm.s.fMayHaltInRing0
771 && !TRPMHasTrap(pVCpu)
772 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
773 || uMWait > 1))
774 {
775 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
776 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
777 {
778 /*
779 * Interrupts pending already?
780 */
781 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
782 APICUpdatePendingInterrupts(pVCpu);
783
784 /*
785 * Flags that wake up from the halted state.
786 */
787 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
788 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
789
790 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
791 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
792 ASMNopPause();
793
794 /*
795 * Check out how long till the next timer event.
796 */
797 uint64_t u64Delta;
798 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
799
800 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
801 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
802 {
803 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
804 APICUpdatePendingInterrupts(pVCpu);
805
806 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
807 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
808
809 /*
810 * Wait if there is enough time to the next timer event.
811 */
812 if (u64Delta >= pVCpu->vmm.s.cNsSpinBlockThreshold)
813 {
814 /* If there are few other CPU cores around, we will procrastinate a
815 little before going to sleep, hoping for some device raising an
816 interrupt or similar. Though, the best thing here would be to
817 dynamically adjust the spin count according to its usfulness or
818 something... */
819 if ( pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3
820 && RTMpGetOnlineCount() >= 4)
821 {
822 /** @todo Figure out how we can skip this if it hasn't help recently...
823 * @bugref{9172#c12} */
824 uint32_t cSpinLoops = 42;
825 while (cSpinLoops-- > 0)
826 {
827 ASMNopPause();
828 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
829 APICUpdatePendingInterrupts(pVCpu);
830 ASMNopPause();
831 if (VM_FF_IS_ANY_SET(pVM, fVmFFs))
832 {
833 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
834 return VINF_EM_HALT;
835 }
836 ASMNopPause();
837 if (VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
838 {
839 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
840 return VINF_EM_HALT;
841 }
842 ASMNopPause();
843 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
844 {
845 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromSpin);
846 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
847 }
848 ASMNopPause();
849 }
850 }
851
852 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
853 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
854 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
855 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
856 int rc = GVMMR0SchedHalt(pGVM, pVM, pGVCpu, u64GipTime);
857 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
858 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
859 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
860 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
861 if ( rc == VINF_SUCCESS
862 || rc == VERR_INTERRUPTED)
863
864 {
865 /* Keep some stats like ring-3 does. */
866 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
867 if (cNsOverslept > 50000)
868 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
869 else if (cNsOverslept < -50000)
870 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
871 else
872 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
873
874 /*
875 * Recheck whether we can resume execution or have to go to ring-3.
876 */
877 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
878 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
879 {
880 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
881 APICUpdatePendingInterrupts(pVCpu);
882 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
883 {
884 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromBlock);
885 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
886 }
887 }
888 }
889 }
890 }
891 }
892 }
893 return VINF_EM_HALT;
894}
895
896
897/**
898 * VMM ring-0 thread-context callback.
899 *
900 * This does common HM state updating and calls the HM-specific thread-context
901 * callback.
902 *
903 * @param enmEvent The thread-context event.
904 * @param pvUser Opaque pointer to the VMCPU.
905 *
906 * @thread EMT(pvUser)
907 */
908static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
909{
910 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
911
912 switch (enmEvent)
913 {
914 case RTTHREADCTXEVENT_IN:
915 {
916 /*
917 * Linux may call us with preemption enabled (really!) but technically we
918 * cannot get preempted here, otherwise we end up in an infinite recursion
919 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
920 * ad infinitum). Let's just disable preemption for now...
921 */
922 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
923 * preemption after doing the callout (one or two functions up the
924 * call chain). */
925 /** @todo r=ramshankar: See @bugref{5313#c30}. */
926 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
927 RTThreadPreemptDisable(&ParanoidPreemptState);
928
929 /* We need to update the VCPU <-> host CPU mapping. */
930 RTCPUID idHostCpu;
931 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
932 pVCpu->iHostCpuSet = iHostCpuSet;
933 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
934
935 /* In the very unlikely event that the GIP delta for the CPU we're
936 rescheduled needs calculating, try force a return to ring-3.
937 We unfortunately cannot do the measurements right here. */
938 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
939 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
940
941 /* Invoke the HM-specific thread-context callback. */
942 HMR0ThreadCtxCallback(enmEvent, pvUser);
943
944 /* Restore preemption. */
945 RTThreadPreemptRestore(&ParanoidPreemptState);
946 break;
947 }
948
949 case RTTHREADCTXEVENT_OUT:
950 {
951 /* Invoke the HM-specific thread-context callback. */
952 HMR0ThreadCtxCallback(enmEvent, pvUser);
953
954 /*
955 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
956 * have the same host CPU associated with it.
957 */
958 pVCpu->iHostCpuSet = UINT32_MAX;
959 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
960 break;
961 }
962
963 default:
964 /* Invoke the HM-specific thread-context callback. */
965 HMR0ThreadCtxCallback(enmEvent, pvUser);
966 break;
967 }
968}
969
970
971/**
972 * Creates thread switching hook for the current EMT thread.
973 *
974 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
975 * platform does not implement switcher hooks, no hooks will be create and the
976 * member set to NIL_RTTHREADCTXHOOK.
977 *
978 * @returns VBox status code.
979 * @param pVCpu The cross context virtual CPU structure.
980 * @thread EMT(pVCpu)
981 */
982VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
983{
984 VMCPU_ASSERT_EMT(pVCpu);
985 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
986
987#if 1 /* To disable this stuff change to zero. */
988 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
989 if (RT_SUCCESS(rc))
990 return rc;
991#else
992 RT_NOREF(vmmR0ThreadCtxCallback);
993 int rc = VERR_NOT_SUPPORTED;
994#endif
995
996 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
997 if (rc == VERR_NOT_SUPPORTED)
998 return VINF_SUCCESS;
999
1000 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1001 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1002}
1003
1004
1005/**
1006 * Destroys the thread switching hook for the specified VCPU.
1007 *
1008 * @param pVCpu The cross context virtual CPU structure.
1009 * @remarks Can be called from any thread.
1010 */
1011VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1012{
1013 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
1014 AssertRC(rc);
1015 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1016}
1017
1018
1019/**
1020 * Disables the thread switching hook for this VCPU (if we got one).
1021 *
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @thread EMT(pVCpu)
1024 *
1025 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1026 * this call. This means you have to be careful with what you do!
1027 */
1028VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1029{
1030 /*
1031 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1032 * @bugref{7726#c19} explains the need for this trick:
1033 *
1034 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
1035 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1036 * longjmp & normal return to ring-3, which opens a window where we may be
1037 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1038 * the CPU starts executing a different EMT. Both functions first disables
1039 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1040 * an opening for getting preempted.
1041 */
1042 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1043 * all the time. */
1044 /** @todo move this into the context hook disabling if(). */
1045 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1046
1047 /*
1048 * Disable the context hook, if we got one.
1049 */
1050 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1051 {
1052 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1053 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1054 AssertRC(rc);
1055 }
1056}
1057
1058
1059/**
1060 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1061 *
1062 * @returns true if registered, false otherwise.
1063 * @param pVCpu The cross context virtual CPU structure.
1064 */
1065DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1066{
1067 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1068}
1069
1070
1071/**
1072 * Whether thread-context hooks are registered for this VCPU.
1073 *
1074 * @returns true if registered, false otherwise.
1075 * @param pVCpu The cross context virtual CPU structure.
1076 */
1077VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1078{
1079 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1080}
1081
1082
1083#ifdef VBOX_WITH_STATISTICS
1084/**
1085 * Record return code statistics
1086 * @param pVM The cross context VM structure.
1087 * @param pVCpu The cross context virtual CPU structure.
1088 * @param rc The status code.
1089 */
1090static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1091{
1092 /*
1093 * Collect statistics.
1094 */
1095 switch (rc)
1096 {
1097 case VINF_SUCCESS:
1098 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1099 break;
1100 case VINF_EM_RAW_INTERRUPT:
1101 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1102 break;
1103 case VINF_EM_RAW_INTERRUPT_HYPER:
1104 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1105 break;
1106 case VINF_EM_RAW_GUEST_TRAP:
1107 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1108 break;
1109 case VINF_EM_RAW_RING_SWITCH:
1110 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1111 break;
1112 case VINF_EM_RAW_RING_SWITCH_INT:
1113 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1114 break;
1115 case VINF_EM_RAW_STALE_SELECTOR:
1116 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1117 break;
1118 case VINF_EM_RAW_IRET_TRAP:
1119 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1120 break;
1121 case VINF_IOM_R3_IOPORT_READ:
1122 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1123 break;
1124 case VINF_IOM_R3_IOPORT_WRITE:
1125 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1126 break;
1127 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1128 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1129 break;
1130 case VINF_IOM_R3_MMIO_READ:
1131 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1132 break;
1133 case VINF_IOM_R3_MMIO_WRITE:
1134 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1135 break;
1136 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1137 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1138 break;
1139 case VINF_IOM_R3_MMIO_READ_WRITE:
1140 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1141 break;
1142 case VINF_PATM_HC_MMIO_PATCH_READ:
1143 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1144 break;
1145 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1146 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1147 break;
1148 case VINF_CPUM_R3_MSR_READ:
1149 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1150 break;
1151 case VINF_CPUM_R3_MSR_WRITE:
1152 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1153 break;
1154 case VINF_EM_RAW_EMULATE_INSTR:
1155 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1156 break;
1157 case VINF_PATCH_EMULATE_INSTR:
1158 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1159 break;
1160 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1161 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1162 break;
1163 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1164 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1165 break;
1166 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1167 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1168 break;
1169 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1170 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1171 break;
1172 case VINF_CSAM_PENDING_ACTION:
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1174 break;
1175 case VINF_PGM_SYNC_CR3:
1176 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1177 break;
1178 case VINF_PATM_PATCH_INT3:
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1180 break;
1181 case VINF_PATM_PATCH_TRAP_PF:
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1183 break;
1184 case VINF_PATM_PATCH_TRAP_GP:
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1186 break;
1187 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1189 break;
1190 case VINF_EM_RESCHEDULE_REM:
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1192 break;
1193 case VINF_EM_RAW_TO_R3:
1194 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1195 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1196 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1197 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1198 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1199 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1200 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1201 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1202 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1203 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1204 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1205 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1207 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1208 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1209 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1210 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1211 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1213 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1214 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1215 else
1216 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1217 break;
1218
1219 case VINF_EM_RAW_TIMER_PENDING:
1220 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1221 break;
1222 case VINF_EM_RAW_INTERRUPT_PENDING:
1223 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1224 break;
1225 case VINF_VMM_CALL_HOST:
1226 switch (pVCpu->vmm.s.enmCallRing3Operation)
1227 {
1228 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1229 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1230 break;
1231 case VMMCALLRING3_PDM_LOCK:
1232 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1233 break;
1234 case VMMCALLRING3_PGM_POOL_GROW:
1235 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1236 break;
1237 case VMMCALLRING3_PGM_LOCK:
1238 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1239 break;
1240 case VMMCALLRING3_PGM_MAP_CHUNK:
1241 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1242 break;
1243 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1244 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1245 break;
1246 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
1247 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
1248 break;
1249 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1250 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1251 break;
1252 case VMMCALLRING3_VM_SET_ERROR:
1253 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1254 break;
1255 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1256 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1257 break;
1258 case VMMCALLRING3_VM_R0_ASSERTION:
1259 default:
1260 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1261 break;
1262 }
1263 break;
1264 case VINF_PATM_DUPLICATE_FUNCTION:
1265 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1266 break;
1267 case VINF_PGM_CHANGE_MODE:
1268 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1269 break;
1270 case VINF_PGM_POOL_FLUSH_PENDING:
1271 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1272 break;
1273 case VINF_EM_PENDING_REQUEST:
1274 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1275 break;
1276 case VINF_EM_HM_PATCH_TPR_INSTR:
1277 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1278 break;
1279 default:
1280 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1281 break;
1282 }
1283}
1284#endif /* VBOX_WITH_STATISTICS */
1285
1286
1287/**
1288 * The Ring 0 entry point, called by the fast-ioctl path.
1289 *
1290 * @param pGVM The global (ring-0) VM structure.
1291 * @param pVM The cross context VM structure.
1292 * The return code is stored in pVM->vmm.s.iLastGZRc.
1293 * @param idCpu The Virtual CPU ID of the calling EMT.
1294 * @param enmOperation Which operation to execute.
1295 * @remarks Assume called with interrupts _enabled_.
1296 */
1297VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1298{
1299 /*
1300 * Validation.
1301 */
1302 if ( idCpu < pGVM->cCpus
1303 && pGVM->cCpus == pVM->cCpus)
1304 { /*likely*/ }
1305 else
1306 {
1307 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
1308 return;
1309 }
1310
1311 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1312#ifdef VBOX_BUGREF_9217
1313 PVMCPUCC pVCpu = pGVCpu;
1314#else
1315 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
1316#endif
1317 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1318 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1319 && pVCpu->hNativeThreadR0 == hNativeThread))
1320 { /* likely */ }
1321 else
1322 {
1323 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
1324 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
1325 return;
1326 }
1327
1328 /*
1329 * SMAP fun.
1330 */
1331 VMM_CHECK_SMAP_SETUP();
1332 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1333
1334 /*
1335 * Perform requested operation.
1336 */
1337 switch (enmOperation)
1338 {
1339 /*
1340 * Run guest code using the available hardware acceleration technology.
1341 */
1342 case VMMR0_DO_HM_RUN:
1343 {
1344 for (;;) /* hlt loop */
1345 {
1346 /*
1347 * Disable preemption.
1348 */
1349 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1350 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1351 RTThreadPreemptDisable(&PreemptState);
1352
1353 /*
1354 * Get the host CPU identifiers, make sure they are valid and that
1355 * we've got a TSC delta for the CPU.
1356 */
1357 RTCPUID idHostCpu;
1358 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1359 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1360 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1361 {
1362 pVCpu->iHostCpuSet = iHostCpuSet;
1363 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1364
1365 /*
1366 * Update the periodic preemption timer if it's active.
1367 */
1368 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1369 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1370 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1371
1372#ifdef VMM_R0_TOUCH_FPU
1373 /*
1374 * Make sure we've got the FPU state loaded so and we don't need to clear
1375 * CR0.TS and get out of sync with the host kernel when loading the guest
1376 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1377 */
1378 CPUMR0TouchHostFpu();
1379#endif
1380 int rc;
1381 bool fPreemptRestored = false;
1382 if (!HMR0SuspendPending())
1383 {
1384 /*
1385 * Enable the context switching hook.
1386 */
1387 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1388 {
1389 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1390 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1391 }
1392
1393 /*
1394 * Enter HM context.
1395 */
1396 rc = HMR0Enter(pVCpu);
1397 if (RT_SUCCESS(rc))
1398 {
1399 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1400
1401 /*
1402 * When preemption hooks are in place, enable preemption now that
1403 * we're in HM context.
1404 */
1405 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1406 {
1407 fPreemptRestored = true;
1408 RTThreadPreemptRestore(&PreemptState);
1409 }
1410
1411 /*
1412 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1413 */
1414 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1415 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1416 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1417
1418 /*
1419 * Assert sanity on the way out. Using manual assertions code here as normal
1420 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1421 */
1422 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1423 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1424 {
1425 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1426 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1427 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1428 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1429 }
1430 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1431 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1432 {
1433 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1434 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1435 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1436 rc = VERR_INVALID_STATE;
1437 }
1438
1439 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1440 }
1441 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
1442
1443 /*
1444 * Invalidate the host CPU identifiers before we disable the context
1445 * hook / restore preemption.
1446 */
1447 pVCpu->iHostCpuSet = UINT32_MAX;
1448 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1449
1450 /*
1451 * Disable context hooks. Due to unresolved cleanup issues, we
1452 * cannot leave the hooks enabled when we return to ring-3.
1453 *
1454 * Note! At the moment HM may also have disabled the hook
1455 * when we get here, but the IPRT API handles that.
1456 */
1457 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1458 {
1459 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1460 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1461 }
1462 }
1463 /*
1464 * The system is about to go into suspend mode; go back to ring 3.
1465 */
1466 else
1467 {
1468 rc = VINF_EM_RAW_INTERRUPT;
1469 pVCpu->iHostCpuSet = UINT32_MAX;
1470 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1471 }
1472
1473 /** @todo When HM stops messing with the context hook state, we'll disable
1474 * preemption again before the RTThreadCtxHookDisable call. */
1475 if (!fPreemptRestored)
1476 RTThreadPreemptRestore(&PreemptState);
1477
1478 pVCpu->vmm.s.iLastGZRc = rc;
1479
1480 /* Fire dtrace probe and collect statistics. */
1481 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1482#ifdef VBOX_WITH_STATISTICS
1483 vmmR0RecordRC(pVM, pVCpu, rc);
1484#endif
1485#if 1
1486 /*
1487 * If this is a halt.
1488 */
1489 if (rc != VINF_EM_HALT)
1490 { /* we're not in a hurry for a HLT, so prefer this path */ }
1491 else
1492 {
1493 pVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu);
1494 if (rc == VINF_SUCCESS)
1495 {
1496 pVCpu->vmm.s.cR0HaltsSucceeded++;
1497 continue;
1498 }
1499 pVCpu->vmm.s.cR0HaltsToRing3++;
1500 }
1501#endif
1502 }
1503 /*
1504 * Invalid CPU set index or TSC delta in need of measuring.
1505 */
1506 else
1507 {
1508 pVCpu->iHostCpuSet = UINT32_MAX;
1509 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1510 RTThreadPreemptRestore(&PreemptState);
1511 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1512 {
1513 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1514 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1515 0 /*default cTries*/);
1516 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1517 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1518 else
1519 pVCpu->vmm.s.iLastGZRc = rc;
1520 }
1521 else
1522 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1523 }
1524 break;
1525
1526 } /* halt loop. */
1527 break;
1528 }
1529
1530#ifdef VBOX_WITH_NEM_R0
1531# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1532 case VMMR0_DO_NEM_RUN:
1533 {
1534 /*
1535 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1536 */
1537 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1538 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1539 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1540 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
1541
1542 pVCpu->vmm.s.iLastGZRc = rc;
1543
1544 /*
1545 * Fire dtrace probe and collect statistics.
1546 */
1547 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1548# ifdef VBOX_WITH_STATISTICS
1549 vmmR0RecordRC(pVM, pVCpu, rc);
1550# endif
1551 break;
1552 }
1553# endif
1554#endif
1555
1556 /*
1557 * For profiling.
1558 */
1559 case VMMR0_DO_NOP:
1560 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1561 break;
1562
1563 /*
1564 * Shouldn't happen.
1565 */
1566 default:
1567 AssertMsgFailed(("%#x\n", enmOperation));
1568 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1569 break;
1570 }
1571 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1572}
1573
1574
1575/**
1576 * Validates a session or VM session argument.
1577 *
1578 * @returns true / false accordingly.
1579 * @param pVM The cross context VM structure.
1580 * @param pClaimedSession The session claim to validate.
1581 * @param pSession The session argument.
1582 */
1583DECLINLINE(bool) vmmR0IsValidSession(PVMCC pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1584{
1585 /* This must be set! */
1586 if (!pSession)
1587 return false;
1588
1589 /* Only one out of the two. */
1590 if (pVM && pClaimedSession)
1591 return false;
1592 if (pVM)
1593 pClaimedSession = pVM->pSession;
1594 return pClaimedSession == pSession;
1595}
1596
1597
1598/**
1599 * VMMR0EntryEx worker function, either called directly or when ever possible
1600 * called thru a longjmp so we can exit safely on failure.
1601 *
1602 * @returns VBox status code.
1603 * @param pGVM The global (ring-0) VM structure.
1604 * @param pVM The cross context VM structure.
1605 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1606 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1607 * @param enmOperation Which operation to execute.
1608 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1609 * The support driver validates this if it's present.
1610 * @param u64Arg Some simple constant argument.
1611 * @param pSession The session of the caller.
1612 *
1613 * @remarks Assume called with interrupts _enabled_.
1614 */
1615static int vmmR0EntryExWorker(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1616 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1617{
1618 /*
1619 * Validate pGVM, pVM and idCpu for consistency and validity.
1620 */
1621 if ( pGVM != NULL
1622 || pVM != NULL)
1623 {
1624 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1625 && RT_VALID_PTR(pVM)
1626 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1627 { /* likely */ }
1628 else
1629 {
1630 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1631 return VERR_INVALID_POINTER;
1632 }
1633
1634#ifdef VBOX_BUGREF_9217
1635 if (RT_LIKELY(pGVM == pVM))
1636#else
1637 if (RT_LIKELY(pGVM->pVM == pVM))
1638#endif
1639 { /* likely */ }
1640 else
1641 {
1642#ifdef VBOX_BUGREF_9217
1643 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM/pVM=%p\n", pVM, pGVM);
1644#else
1645 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1646#endif
1647 return VERR_INVALID_PARAMETER;
1648 }
1649
1650 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1651 { /* likely */ }
1652 else
1653 {
1654 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1655 return VERR_INVALID_PARAMETER;
1656 }
1657
1658#ifdef VBOX_BUGREF_9217
1659 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1660 && pVM->enmVMState <= VMSTATE_TERMINATED
1661 && pVM->cCpus == pGVM->cCpus
1662 && pVM->pSession == pSession
1663 && pVM->pSelf == pVM))
1664#else
1665 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1666 && pVM->enmVMState <= VMSTATE_TERMINATED
1667 && pVM->cCpus == pGVM->cCpus
1668 && pVM->pSession == pSession
1669 && pVM->pVMR0 == pVM))
1670#endif
1671 { /* likely */ }
1672 else
1673 {
1674#ifdef VBOX_BUGREF_9217
1675 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1676 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pSelf, pVM, enmOperation);
1677#else
1678 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1679 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1680#endif
1681 return VERR_INVALID_POINTER;
1682 }
1683 }
1684 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1685 { /* likely */ }
1686 else
1687 {
1688 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1689 return VERR_INVALID_PARAMETER;
1690 }
1691
1692 /*
1693 * SMAP fun.
1694 */
1695 VMM_CHECK_SMAP_SETUP();
1696 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1697
1698 /*
1699 * Process the request.
1700 */
1701 int rc;
1702 switch (enmOperation)
1703 {
1704 /*
1705 * GVM requests
1706 */
1707 case VMMR0_DO_GVMM_CREATE_VM:
1708 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1709 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1710 else
1711 rc = VERR_INVALID_PARAMETER;
1712 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1713 break;
1714
1715 case VMMR0_DO_GVMM_DESTROY_VM:
1716 if (pReqHdr == NULL && u64Arg == 0)
1717 rc = GVMMR0DestroyVM(pGVM, pVM);
1718 else
1719 rc = VERR_INVALID_PARAMETER;
1720 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1721 break;
1722
1723 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1724 if (pGVM != NULL && pVM != NULL)
1725 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1726 else
1727 rc = VERR_INVALID_PARAMETER;
1728 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1729 break;
1730
1731 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1732 if (pGVM != NULL && pVM != NULL)
1733 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1734 else
1735 rc = VERR_INVALID_PARAMETER;
1736 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1737 break;
1738
1739 case VMMR0_DO_GVMM_SCHED_HALT:
1740 if (pReqHdr)
1741 return VERR_INVALID_PARAMETER;
1742 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1743 rc = GVMMR0SchedHaltReq(pGVM, pVM, idCpu, u64Arg);
1744 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1745 break;
1746
1747 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1748 if (pReqHdr || u64Arg)
1749 return VERR_INVALID_PARAMETER;
1750 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1751 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1752 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1753 break;
1754
1755 case VMMR0_DO_GVMM_SCHED_POKE:
1756 if (pReqHdr || u64Arg)
1757 return VERR_INVALID_PARAMETER;
1758 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1759 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1760 break;
1761
1762 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1763 if (u64Arg)
1764 return VERR_INVALID_PARAMETER;
1765 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1766 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1767 break;
1768
1769 case VMMR0_DO_GVMM_SCHED_POLL:
1770 if (pReqHdr || u64Arg > 1)
1771 return VERR_INVALID_PARAMETER;
1772 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1773 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1774 break;
1775
1776 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1777 if (u64Arg)
1778 return VERR_INVALID_PARAMETER;
1779 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1780 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1781 break;
1782
1783 case VMMR0_DO_GVMM_RESET_STATISTICS:
1784 if (u64Arg)
1785 return VERR_INVALID_PARAMETER;
1786 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1787 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1788 break;
1789
1790 /*
1791 * Initialize the R0 part of a VM instance.
1792 */
1793 case VMMR0_DO_VMMR0_INIT:
1794 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1795 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1796 break;
1797
1798 /*
1799 * Does EMT specific ring-0 init.
1800 */
1801 case VMMR0_DO_VMMR0_INIT_EMT:
1802 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
1803 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1804 break;
1805
1806 /*
1807 * Terminate the R0 part of a VM instance.
1808 */
1809 case VMMR0_DO_VMMR0_TERM:
1810 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1811 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1812 break;
1813
1814 /*
1815 * Attempt to enable hm mode and check the current setting.
1816 */
1817 case VMMR0_DO_HM_ENABLE:
1818 rc = HMR0EnableAllCpus(pVM);
1819 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1820 break;
1821
1822 /*
1823 * Setup the hardware accelerated session.
1824 */
1825 case VMMR0_DO_HM_SETUP_VM:
1826 rc = HMR0SetupVM(pVM);
1827 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1828 break;
1829
1830 /*
1831 * PGM wrappers.
1832 */
1833 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1834 if (idCpu == NIL_VMCPUID)
1835 return VERR_INVALID_CPU_ID;
1836 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1837 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1838 break;
1839
1840 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1841 if (idCpu == NIL_VMCPUID)
1842 return VERR_INVALID_CPU_ID;
1843 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1844 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1845 break;
1846
1847 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1848 if (idCpu == NIL_VMCPUID)
1849 return VERR_INVALID_CPU_ID;
1850 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1851 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1852 break;
1853
1854 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1855 if (idCpu != 0)
1856 return VERR_INVALID_CPU_ID;
1857 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1858 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1859 break;
1860
1861 /*
1862 * GMM wrappers.
1863 */
1864 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1865 if (u64Arg)
1866 return VERR_INVALID_PARAMETER;
1867 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1868 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1869 break;
1870
1871 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1872 if (u64Arg)
1873 return VERR_INVALID_PARAMETER;
1874 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1875 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1876 break;
1877
1878 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1879 if (u64Arg)
1880 return VERR_INVALID_PARAMETER;
1881 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1882 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1883 break;
1884
1885 case VMMR0_DO_GMM_FREE_PAGES:
1886 if (u64Arg)
1887 return VERR_INVALID_PARAMETER;
1888 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1889 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1890 break;
1891
1892 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1893 if (u64Arg)
1894 return VERR_INVALID_PARAMETER;
1895 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1896 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1897 break;
1898
1899 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1900 if (u64Arg)
1901 return VERR_INVALID_PARAMETER;
1902 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1903 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1904 break;
1905
1906 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1907 if (idCpu == NIL_VMCPUID)
1908 return VERR_INVALID_CPU_ID;
1909 if (u64Arg)
1910 return VERR_INVALID_PARAMETER;
1911 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1912 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1913 break;
1914
1915 case VMMR0_DO_GMM_BALLOONED_PAGES:
1916 if (u64Arg)
1917 return VERR_INVALID_PARAMETER;
1918 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1919 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1920 break;
1921
1922 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1923 if (u64Arg)
1924 return VERR_INVALID_PARAMETER;
1925 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1926 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1927 break;
1928
1929 case VMMR0_DO_GMM_SEED_CHUNK:
1930 if (pReqHdr)
1931 return VERR_INVALID_PARAMETER;
1932 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1933 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1934 break;
1935
1936 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1937 if (idCpu == NIL_VMCPUID)
1938 return VERR_INVALID_CPU_ID;
1939 if (u64Arg)
1940 return VERR_INVALID_PARAMETER;
1941 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1942 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1943 break;
1944
1945 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1946 if (idCpu == NIL_VMCPUID)
1947 return VERR_INVALID_CPU_ID;
1948 if (u64Arg)
1949 return VERR_INVALID_PARAMETER;
1950 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1951 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1952 break;
1953
1954 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1955 if (idCpu == NIL_VMCPUID)
1956 return VERR_INVALID_CPU_ID;
1957 if ( u64Arg
1958 || pReqHdr)
1959 return VERR_INVALID_PARAMETER;
1960 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1961 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1962 break;
1963
1964#ifdef VBOX_WITH_PAGE_SHARING
1965 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1966 {
1967 if (idCpu == NIL_VMCPUID)
1968 return VERR_INVALID_CPU_ID;
1969 if ( u64Arg
1970 || pReqHdr)
1971 return VERR_INVALID_PARAMETER;
1972 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1973 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1974 break;
1975 }
1976#endif
1977
1978#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1979 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1980 if (u64Arg)
1981 return VERR_INVALID_PARAMETER;
1982 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1983 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1984 break;
1985#endif
1986
1987 case VMMR0_DO_GMM_QUERY_STATISTICS:
1988 if (u64Arg)
1989 return VERR_INVALID_PARAMETER;
1990 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1991 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1992 break;
1993
1994 case VMMR0_DO_GMM_RESET_STATISTICS:
1995 if (u64Arg)
1996 return VERR_INVALID_PARAMETER;
1997 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1998 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1999 break;
2000
2001 /*
2002 * A quick GCFGM mock-up.
2003 */
2004 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2005 case VMMR0_DO_GCFGM_SET_VALUE:
2006 case VMMR0_DO_GCFGM_QUERY_VALUE:
2007 {
2008 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2009 return VERR_INVALID_PARAMETER;
2010 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2011 if (pReq->Hdr.cbReq != sizeof(*pReq))
2012 return VERR_INVALID_PARAMETER;
2013 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2014 {
2015 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2016 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2017 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2018 }
2019 else
2020 {
2021 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2022 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2023 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2024 }
2025 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2026 break;
2027 }
2028
2029 /*
2030 * PDM Wrappers.
2031 */
2032 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2033 {
2034 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2035 return VERR_INVALID_PARAMETER;
2036 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2037 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2038 break;
2039 }
2040
2041 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
2042 {
2043 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2044 return VERR_INVALID_PARAMETER;
2045 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
2046 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2047 break;
2048 }
2049
2050 /*
2051 * Requests to the internal networking service.
2052 */
2053 case VMMR0_DO_INTNET_OPEN:
2054 {
2055 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2056 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2057 return VERR_INVALID_PARAMETER;
2058 rc = IntNetR0OpenReq(pSession, pReq);
2059 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2060 break;
2061 }
2062
2063 case VMMR0_DO_INTNET_IF_CLOSE:
2064 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2065 return VERR_INVALID_PARAMETER;
2066 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2067 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2068 break;
2069
2070
2071 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2072 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2073 return VERR_INVALID_PARAMETER;
2074 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2075 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2076 break;
2077
2078 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2079 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2080 return VERR_INVALID_PARAMETER;
2081 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2082 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2083 break;
2084
2085 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2086 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2087 return VERR_INVALID_PARAMETER;
2088 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2089 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2090 break;
2091
2092 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2093 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2094 return VERR_INVALID_PARAMETER;
2095 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2096 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2097 break;
2098
2099 case VMMR0_DO_INTNET_IF_SEND:
2100 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2101 return VERR_INVALID_PARAMETER;
2102 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2103 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2104 break;
2105
2106 case VMMR0_DO_INTNET_IF_WAIT:
2107 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2108 return VERR_INVALID_PARAMETER;
2109 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2110 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2111 break;
2112
2113 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2114 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2115 return VERR_INVALID_PARAMETER;
2116 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2117 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2118 break;
2119
2120#ifdef VBOX_WITH_PCI_PASSTHROUGH
2121 /*
2122 * Requests to host PCI driver service.
2123 */
2124 case VMMR0_DO_PCIRAW_REQ:
2125 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2126 return VERR_INVALID_PARAMETER;
2127 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2128 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2129 break;
2130#endif
2131
2132 /*
2133 * NEM requests.
2134 */
2135#ifdef VBOX_WITH_NEM_R0
2136# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2137 case VMMR0_DO_NEM_INIT_VM:
2138 if (u64Arg || pReqHdr || idCpu != 0)
2139 return VERR_INVALID_PARAMETER;
2140 rc = NEMR0InitVM(pGVM, pVM);
2141 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2142 break;
2143
2144 case VMMR0_DO_NEM_INIT_VM_PART_2:
2145 if (u64Arg || pReqHdr || idCpu != 0)
2146 return VERR_INVALID_PARAMETER;
2147 rc = NEMR0InitVMPart2(pGVM, pVM);
2148 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2149 break;
2150
2151 case VMMR0_DO_NEM_MAP_PAGES:
2152 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2153 return VERR_INVALID_PARAMETER;
2154 rc = NEMR0MapPages(pGVM, pVM, idCpu);
2155 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2156 break;
2157
2158 case VMMR0_DO_NEM_UNMAP_PAGES:
2159 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2160 return VERR_INVALID_PARAMETER;
2161 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
2162 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2163 break;
2164
2165 case VMMR0_DO_NEM_EXPORT_STATE:
2166 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2167 return VERR_INVALID_PARAMETER;
2168 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2169 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2170 break;
2171
2172 case VMMR0_DO_NEM_IMPORT_STATE:
2173 if (pReqHdr || idCpu == NIL_VMCPUID)
2174 return VERR_INVALID_PARAMETER;
2175 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2176 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2177 break;
2178
2179 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2180 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2181 return VERR_INVALID_PARAMETER;
2182 rc = NEMR0QueryCpuTick(pGVM, pVM, idCpu);
2183 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2184 break;
2185
2186 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2187 if (pReqHdr || idCpu == NIL_VMCPUID)
2188 return VERR_INVALID_PARAMETER;
2189 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg);
2190 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2191 break;
2192
2193 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2194 if (u64Arg || pReqHdr)
2195 return VERR_INVALID_PARAMETER;
2196 rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu);
2197 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2198 break;
2199
2200# if 1 && defined(DEBUG_bird)
2201 case VMMR0_DO_NEM_EXPERIMENT:
2202 if (pReqHdr)
2203 return VERR_INVALID_PARAMETER;
2204 rc = NEMR0DoExperiment(pGVM, pVM, idCpu, u64Arg);
2205 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2206 break;
2207# endif
2208# endif
2209#endif
2210
2211 /*
2212 * For profiling.
2213 */
2214 case VMMR0_DO_NOP:
2215 case VMMR0_DO_SLOW_NOP:
2216 return VINF_SUCCESS;
2217
2218 /*
2219 * For testing Ring-0 APIs invoked in this environment.
2220 */
2221 case VMMR0_DO_TESTS:
2222 /** @todo make new test */
2223 return VINF_SUCCESS;
2224
2225 default:
2226 /*
2227 * We're returning VERR_NOT_SUPPORT here so we've got something else
2228 * than -1 which the interrupt gate glue code might return.
2229 */
2230 Log(("operation %#x is not supported\n", enmOperation));
2231 return VERR_NOT_SUPPORTED;
2232 }
2233 return rc;
2234}
2235
2236
2237/**
2238 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2239 */
2240typedef struct VMMR0ENTRYEXARGS
2241{
2242 PGVM pGVM;
2243 PVMCC pVM;
2244 VMCPUID idCpu;
2245 VMMR0OPERATION enmOperation;
2246 PSUPVMMR0REQHDR pReq;
2247 uint64_t u64Arg;
2248 PSUPDRVSESSION pSession;
2249} VMMR0ENTRYEXARGS;
2250/** Pointer to a vmmR0EntryExWrapper argument package. */
2251typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2252
2253/**
2254 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2255 *
2256 * @returns VBox status code.
2257 * @param pvArgs The argument package
2258 */
2259static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2260{
2261 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2262 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2263 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2264 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2265 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2266 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2267 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2268}
2269
2270
2271/**
2272 * The Ring 0 entry point, called by the support library (SUP).
2273 *
2274 * @returns VBox status code.
2275 * @param pGVM The global (ring-0) VM structure.
2276 * @param pVM The cross context VM structure.
2277 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2278 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2279 * @param enmOperation Which operation to execute.
2280 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2281 * @param u64Arg Some simple constant argument.
2282 * @param pSession The session of the caller.
2283 * @remarks Assume called with interrupts _enabled_.
2284 */
2285VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2286 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2287{
2288 /*
2289 * Requests that should only happen on the EMT thread will be
2290 * wrapped in a setjmp so we can assert without causing trouble.
2291 */
2292 if ( pVM != NULL
2293 && pGVM != NULL
2294 && idCpu < pGVM->cCpus
2295 && pVM->pSession == pSession
2296#ifdef VBOX_BUGREF_9217
2297 && pVM->pSelf != NULL
2298#else
2299 && pVM->pVMR0 != NULL
2300#endif
2301 )
2302 {
2303 switch (enmOperation)
2304 {
2305 /* These might/will be called before VMMR3Init. */
2306 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2307 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2308 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2309 case VMMR0_DO_GMM_FREE_PAGES:
2310 case VMMR0_DO_GMM_BALLOONED_PAGES:
2311 /* On the mac we might not have a valid jmp buf, so check these as well. */
2312 case VMMR0_DO_VMMR0_INIT:
2313 case VMMR0_DO_VMMR0_TERM:
2314 {
2315 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2316#ifdef VBOX_BUGREF_9217
2317 PVMCPUCC pVCpu = pGVCpu;
2318#else
2319 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2320#endif
2321 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2322 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2323 && pVCpu->hNativeThreadR0 == hNativeThread))
2324 {
2325 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2326 break;
2327
2328 /** @todo validate this EMT claim... GVM knows. */
2329 VMMR0ENTRYEXARGS Args;
2330 Args.pGVM = pGVM;
2331 Args.pVM = pVM;
2332 Args.idCpu = idCpu;
2333 Args.enmOperation = enmOperation;
2334 Args.pReq = pReq;
2335 Args.u64Arg = u64Arg;
2336 Args.pSession = pSession;
2337 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2338 }
2339 return VERR_VM_THREAD_NOT_EMT;
2340 }
2341
2342 default:
2343 break;
2344 }
2345 }
2346 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2347}
2348
2349
2350/**
2351 * Checks whether we've armed the ring-0 long jump machinery.
2352 *
2353 * @returns @c true / @c false
2354 * @param pVCpu The cross context virtual CPU structure.
2355 * @thread EMT
2356 * @sa VMMIsLongJumpArmed
2357 */
2358VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2359{
2360#ifdef RT_ARCH_X86
2361 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2362 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2363#else
2364 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2365 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2366#endif
2367}
2368
2369
2370/**
2371 * Checks whether we've done a ring-3 long jump.
2372 *
2373 * @returns @c true / @c false
2374 * @param pVCpu The cross context virtual CPU structure.
2375 * @thread EMT
2376 */
2377VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2378{
2379 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2380}
2381
2382
2383/**
2384 * Internal R0 logger worker: Flush logger.
2385 *
2386 * @param pLogger The logger instance to flush.
2387 * @remark This function must be exported!
2388 */
2389VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2390{
2391#ifdef LOG_ENABLED
2392 /*
2393 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2394 * (This is a bit paranoid code.)
2395 */
2396 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2397 if ( !VALID_PTR(pR0Logger)
2398 || !VALID_PTR(pR0Logger + 1)
2399 || pLogger->u32Magic != RTLOGGER_MAGIC)
2400 {
2401# ifdef DEBUG
2402 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2403# endif
2404 return;
2405 }
2406 if (pR0Logger->fFlushingDisabled)
2407 return; /* quietly */
2408
2409 PVMCC pVM = pR0Logger->pVM;
2410 if ( !VALID_PTR(pVM)
2411# ifdef VBOX_BUGREF_9217
2412 || pVM->pSelf != pVM
2413# else
2414 || pVM->pVMR0 != pVM
2415# endif
2416 )
2417 {
2418# ifdef DEBUG
2419# ifdef VBOX_BUGREF_9217
2420 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2421# else
2422 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2423# endif
2424# endif
2425 return;
2426 }
2427
2428 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2429 if (pVCpu)
2430 {
2431 /*
2432 * Check that the jump buffer is armed.
2433 */
2434# ifdef RT_ARCH_X86
2435 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2436 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2437# else
2438 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2439 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2440# endif
2441 {
2442# ifdef DEBUG
2443 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2444# endif
2445 return;
2446 }
2447 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2448 }
2449# ifdef DEBUG
2450 else
2451 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2452# endif
2453#else
2454 NOREF(pLogger);
2455#endif /* LOG_ENABLED */
2456}
2457
2458#ifdef LOG_ENABLED
2459
2460/**
2461 * Disables flushing of the ring-0 debug log.
2462 *
2463 * @param pVCpu The cross context virtual CPU structure.
2464 */
2465VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2466{
2467 if (pVCpu->vmm.s.pR0LoggerR0)
2468 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2469 if (pVCpu->vmm.s.pR0RelLoggerR0)
2470 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2471}
2472
2473
2474/**
2475 * Enables flushing of the ring-0 debug log.
2476 *
2477 * @param pVCpu The cross context virtual CPU structure.
2478 */
2479VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2480{
2481 if (pVCpu->vmm.s.pR0LoggerR0)
2482 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2483 if (pVCpu->vmm.s.pR0RelLoggerR0)
2484 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2485}
2486
2487
2488/**
2489 * Checks if log flushing is disabled or not.
2490 *
2491 * @param pVCpu The cross context virtual CPU structure.
2492 */
2493VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2494{
2495 if (pVCpu->vmm.s.pR0LoggerR0)
2496 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2497 if (pVCpu->vmm.s.pR0RelLoggerR0)
2498 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2499 return true;
2500}
2501
2502#endif /* LOG_ENABLED */
2503
2504/**
2505 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2506 */
2507DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2508{
2509 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2510 if (pGVCpu)
2511 {
2512#ifdef VBOX_BUGREF_9217
2513 PVMCPUCC pVCpu = pGVCpu;
2514#else
2515 PVMCPUCC pVCpu = pGVCpu->pVCpu;
2516#endif
2517 if (RT_VALID_PTR(pVCpu))
2518 {
2519 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2520 if (RT_VALID_PTR(pVmmLogger))
2521 {
2522 if ( pVmmLogger->fCreated
2523#ifdef VBOX_BUGREF_9217
2524 && pVmmLogger->pVM == pGVCpu->pGVM
2525#else
2526 && pVmmLogger->pVM == pGVCpu->pVM
2527#endif
2528 )
2529 {
2530 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2531 return NULL;
2532 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2533 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2534 if ( iGroup != UINT16_MAX
2535 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2536 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2537 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2538 return NULL;
2539 return &pVmmLogger->Logger;
2540 }
2541 }
2542 }
2543 }
2544 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2545}
2546
2547
2548/**
2549 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2550 *
2551 * @returns true if the breakpoint should be hit, false if it should be ignored.
2552 */
2553DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2554{
2555#if 0
2556 return true;
2557#else
2558 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2559 if (pVM)
2560 {
2561 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2562
2563 if (pVCpu)
2564 {
2565#ifdef RT_ARCH_X86
2566 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2567 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2568#else
2569 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2570 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2571#endif
2572 {
2573 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2574 return RT_FAILURE_NP(rc);
2575 }
2576 }
2577 }
2578#ifdef RT_OS_LINUX
2579 return true;
2580#else
2581 return false;
2582#endif
2583#endif
2584}
2585
2586
2587/**
2588 * Override this so we can push it up to ring-3.
2589 *
2590 * @param pszExpr Expression. Can be NULL.
2591 * @param uLine Location line number.
2592 * @param pszFile Location file name.
2593 * @param pszFunction Location function name.
2594 */
2595DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2596{
2597 /*
2598 * To the log.
2599 */
2600 LogAlways(("\n!!R0-Assertion Failed!!\n"
2601 "Expression: %s\n"
2602 "Location : %s(%d) %s\n",
2603 pszExpr, pszFile, uLine, pszFunction));
2604
2605 /*
2606 * To the global VMM buffer.
2607 */
2608 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2609 if (pVM)
2610 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2611 "\n!!R0-Assertion Failed!!\n"
2612 "Expression: %.*s\n"
2613 "Location : %s(%d) %s\n",
2614 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2615 pszFile, uLine, pszFunction);
2616
2617 /*
2618 * Continue the normal way.
2619 */
2620 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2621}
2622
2623
2624/**
2625 * Callback for RTLogFormatV which writes to the ring-3 log port.
2626 * See PFNLOGOUTPUT() for details.
2627 */
2628static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2629{
2630 for (size_t i = 0; i < cbChars; i++)
2631 {
2632 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2633 }
2634
2635 NOREF(pv);
2636 return cbChars;
2637}
2638
2639
2640/**
2641 * Override this so we can push it up to ring-3.
2642 *
2643 * @param pszFormat The format string.
2644 * @param va Arguments.
2645 */
2646DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2647{
2648 va_list vaCopy;
2649
2650 /*
2651 * Push the message to the loggers.
2652 */
2653 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2654 if (pLog)
2655 {
2656 va_copy(vaCopy, va);
2657 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2658 va_end(vaCopy);
2659 }
2660 pLog = RTLogRelGetDefaultInstance();
2661 if (pLog)
2662 {
2663 va_copy(vaCopy, va);
2664 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2665 va_end(vaCopy);
2666 }
2667
2668 /*
2669 * Push it to the global VMM buffer.
2670 */
2671 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2672 if (pVM)
2673 {
2674 va_copy(vaCopy, va);
2675 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2676 va_end(vaCopy);
2677 }
2678
2679 /*
2680 * Continue the normal way.
2681 */
2682 RTAssertMsg2V(pszFormat, va);
2683}
2684
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette