VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 87758

Last change on this file since 87758 was 87666, checked in by vboxsync, 4 years ago

AMD IOMMU: bugref:9654 IOTLB cache bits. The IOTLB is currently only enabled in ring-3.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 98.9 KB
Line 
1/* $Id: VMMR0.cpp 87666 2021-02-09 17:08:04Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/avl.h>
57#include <iprt/crc.h>
58#include <iprt/mp.h>
59#include <iprt/once.h>
60#include <iprt/stdarg.h>
61#include <iprt/string.h>
62#include <iprt/thread.h>
63#include <iprt/timer.h>
64#include <iprt/time.h>
65
66#include "dtrace/VBoxVMM.h"
67
68
69#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
70# pragma intrinsic(_AddressOfReturnAddress)
71#endif
72
73#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
74# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
75#endif
76
77
78
79/*********************************************************************************************************************************
80* Defined Constants And Macros *
81*********************************************************************************************************************************/
82/** @def VMM_CHECK_SMAP_SETUP
83 * SMAP check setup. */
84/** @def VMM_CHECK_SMAP_CHECK
85 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
86 * it will be logged and @a a_BadExpr is executed. */
87/** @def VMM_CHECK_SMAP_CHECK2
88 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
89 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
90 * executed. */
91#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
92# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
93# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
94 do { \
95 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
96 { \
97 RTCCUINTREG fEflCheck = ASMGetFlags(); \
98 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
99 { /* likely */ } \
100 else \
101 { \
102 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
103 a_BadExpr; \
104 } \
105 } \
106 } while (0)
107# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
108 do { \
109 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
110 { \
111 RTCCUINTREG fEflCheck = ASMGetFlags(); \
112 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
113 { /* likely */ } \
114 else if (a_pGVM) \
115 { \
116 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
117 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
118 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
119 a_BadExpr; \
120 } \
121 else \
122 { \
123 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
124 a_BadExpr; \
125 } \
126 } \
127 } while (0)
128#else
129# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
130# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
131# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
132#endif
133
134
135/*********************************************************************************************************************************
136* Internal Functions *
137*********************************************************************************************************************************/
138RT_C_DECLS_BEGIN
139#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
140extern uint64_t __udivdi3(uint64_t, uint64_t);
141extern uint64_t __umoddi3(uint64_t, uint64_t);
142#endif
143RT_C_DECLS_END
144
145
146/*********************************************************************************************************************************
147* Global Variables *
148*********************************************************************************************************************************/
149/** Drag in necessary library bits.
150 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
151struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
152{
153 { (PFNRT)RTAvlU64Destroy },
154 { (PFNRT)RTAvlU64DoWithAll },
155 { (PFNRT)RTAvlU64Get },
156 { (PFNRT)RTAvlU64Insert },
157 { (PFNRT)RTAvlU64Remove },
158 { (PFNRT)RTCrc32 },
159 { (PFNRT)RTOnce },
160#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
161 { (PFNRT)__udivdi3 },
162 { (PFNRT)__umoddi3 },
163#endif
164 { NULL }
165};
166
167#ifdef RT_OS_SOLARIS
168/* Dependency information for the native solaris loader. */
169extern "C" { char _depends_on[] = "vboxdrv"; }
170#endif
171
172
173/**
174 * Initialize the module.
175 * This is called when we're first loaded.
176 *
177 * @returns 0 on success.
178 * @returns VBox status on failure.
179 * @param hMod Image handle for use in APIs.
180 */
181DECLEXPORT(int) ModuleInit(void *hMod)
182{
183 VMM_CHECK_SMAP_SETUP();
184 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
185
186#ifdef VBOX_WITH_DTRACE_R0
187 /*
188 * The first thing to do is register the static tracepoints.
189 * (Deregistration is automatic.)
190 */
191 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
192 if (RT_FAILURE(rc2))
193 return rc2;
194#endif
195 LogFlow(("ModuleInit:\n"));
196
197#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
198 /*
199 * Display the CMOS debug code.
200 */
201 ASMOutU8(0x72, 0x03);
202 uint8_t bDebugCode = ASMInU8(0x73);
203 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
204 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
205#endif
206
207 /*
208 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
209 */
210 int rc = vmmInitFormatTypes();
211 if (RT_SUCCESS(rc))
212 {
213 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
214 rc = GVMMR0Init();
215 if (RT_SUCCESS(rc))
216 {
217 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
218 rc = GMMR0Init();
219 if (RT_SUCCESS(rc))
220 {
221 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
222 rc = HMR0Init();
223 if (RT_SUCCESS(rc))
224 {
225 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
226
227 PDMR0Init(hMod);
228 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
229
230 rc = PGMRegisterStringFormatTypes();
231 if (RT_SUCCESS(rc))
232 {
233 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
234#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
235 rc = PGMR0DynMapInit();
236#endif
237 if (RT_SUCCESS(rc))
238 {
239 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
240 rc = IntNetR0Init();
241 if (RT_SUCCESS(rc))
242 {
243#ifdef VBOX_WITH_PCI_PASSTHROUGH
244 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
245 rc = PciRawR0Init();
246#endif
247 if (RT_SUCCESS(rc))
248 {
249 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
250 rc = CPUMR0ModuleInit();
251 if (RT_SUCCESS(rc))
252 {
253#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
254 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
255 rc = vmmR0TripleFaultHackInit();
256 if (RT_SUCCESS(rc))
257#endif
258 {
259 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
260 if (RT_SUCCESS(rc))
261 {
262 LogFlow(("ModuleInit: returns success\n"));
263 return VINF_SUCCESS;
264 }
265 }
266
267 /*
268 * Bail out.
269 */
270#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
271 vmmR0TripleFaultHackTerm();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
276#ifdef VBOX_WITH_PCI_PASSTHROUGH
277 PciRawR0Term();
278#endif
279 }
280 else
281 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
282 IntNetR0Term();
283 }
284 else
285 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
286#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
287 PGMR0DynMapTerm();
288#endif
289 }
290 else
291 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
292 PGMDeregisterStringFormatTypes();
293 }
294 else
295 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
296 HMR0Term();
297 }
298 else
299 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
300 GMMR0Term();
301 }
302 else
303 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
304 GVMMR0Term();
305 }
306 else
307 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
308 vmmTermFormatTypes();
309 }
310 else
311 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
312
313 LogFlow(("ModuleInit: failed %Rrc\n", rc));
314 return rc;
315}
316
317
318/**
319 * Terminate the module.
320 * This is called when we're finally unloaded.
321 *
322 * @param hMod Image handle for use in APIs.
323 */
324DECLEXPORT(void) ModuleTerm(void *hMod)
325{
326 NOREF(hMod);
327 LogFlow(("ModuleTerm:\n"));
328
329 /*
330 * Terminate the CPUM module (Local APIC cleanup).
331 */
332 CPUMR0ModuleTerm();
333
334 /*
335 * Terminate the internal network service.
336 */
337 IntNetR0Term();
338
339 /*
340 * PGM (Darwin), HM and PciRaw global cleanup.
341 */
342#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
343 PGMR0DynMapTerm();
344#endif
345#ifdef VBOX_WITH_PCI_PASSTHROUGH
346 PciRawR0Term();
347#endif
348 PGMDeregisterStringFormatTypes();
349 HMR0Term();
350#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
351 vmmR0TripleFaultHackTerm();
352#endif
353
354 /*
355 * Destroy the GMM and GVMM instances.
356 */
357 GMMR0Term();
358 GVMMR0Term();
359
360 vmmTermFormatTypes();
361
362 LogFlow(("ModuleTerm: returns\n"));
363}
364
365
366/**
367 * Initiates the R0 driver for a particular VM instance.
368 *
369 * @returns VBox status code.
370 *
371 * @param pGVM The global (ring-0) VM structure.
372 * @param uSvnRev The SVN revision of the ring-3 part.
373 * @param uBuildType Build type indicator.
374 * @thread EMT(0)
375 */
376static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
377{
378 VMM_CHECK_SMAP_SETUP();
379 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
380
381 /*
382 * Match the SVN revisions and build type.
383 */
384 if (uSvnRev != VMMGetSvnRev())
385 {
386 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
387 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
388 return VERR_VMM_R0_VERSION_MISMATCH;
389 }
390 if (uBuildType != vmmGetBuildType())
391 {
392 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
393 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
394 return VERR_VMM_R0_VERSION_MISMATCH;
395 }
396
397 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
398 if (RT_FAILURE(rc))
399 return rc;
400
401#ifdef LOG_ENABLED
402 /*
403 * Register the EMT R0 logger instance for VCPU 0.
404 */
405 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
406
407 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
408 if (pR0Logger)
409 {
410# if 0 /* testing of the logger. */
411 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
412 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
413 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
414 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
415
416 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
417 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
418 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
419 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
420
421 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
422 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
423 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
424 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
425
426 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
427 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
428 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
429 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
430 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
431 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
432
433 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
434 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
435
436 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
437 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
438 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
439# endif
440 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
441 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
442 pR0Logger->fRegistered = true;
443 }
444#endif /* LOG_ENABLED */
445
446 /*
447 * Check if the host supports high resolution timers or not.
448 */
449 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
450 && !RTTimerCanDoHighResolution())
451 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
452
453 /*
454 * Initialize the per VM data for GVMM and GMM.
455 */
456 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
457 rc = GVMMR0InitVM(pGVM);
458 if (RT_SUCCESS(rc))
459 {
460 /*
461 * Init HM, CPUM and PGM (Darwin only).
462 */
463 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
464 rc = HMR0InitVM(pGVM);
465 if (RT_SUCCESS(rc))
466 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
467 if (RT_SUCCESS(rc))
468 {
469 rc = CPUMR0InitVM(pGVM);
470 if (RT_SUCCESS(rc))
471 {
472 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
473 rc = PGMR0InitVM(pGVM);
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
477 rc = EMR0InitVM(pGVM);
478 if (RT_SUCCESS(rc))
479 {
480 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
481#ifdef VBOX_WITH_PCI_PASSTHROUGH
482 rc = PciRawR0InitVM(pGVM);
483#endif
484 if (RT_SUCCESS(rc))
485 {
486 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
487 rc = GIMR0InitVM(pGVM);
488 if (RT_SUCCESS(rc))
489 {
490 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
491 if (RT_SUCCESS(rc))
492 {
493 GVMMR0DoneInitVM(pGVM);
494
495 /*
496 * Collect a bit of info for the VM release log.
497 */
498 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
499 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
500
501 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
502 return rc;
503 }
504
505 /* bail out*/
506 GIMR0TermVM(pGVM);
507 }
508#ifdef VBOX_WITH_PCI_PASSTHROUGH
509 PciRawR0TermVM(pGVM);
510#endif
511 }
512 }
513 }
514 }
515 HMR0TermVM(pGVM);
516 }
517 }
518
519 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
520 return rc;
521}
522
523
524/**
525 * Does EMT specific VM initialization.
526 *
527 * @returns VBox status code.
528 * @param pGVM The ring-0 VM structure.
529 * @param idCpu The EMT that's calling.
530 */
531static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
532{
533 /* Paranoia (caller checked these already). */
534 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
535 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
536
537#ifdef LOG_ENABLED
538 /*
539 * Registration of ring 0 loggers.
540 */
541 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
542 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
543 if ( pR0Logger
544 && !pR0Logger->fRegistered)
545 {
546 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
547 pR0Logger->fRegistered = true;
548 }
549#endif
550
551 return VINF_SUCCESS;
552}
553
554
555
556/**
557 * Terminates the R0 bits for a particular VM instance.
558 *
559 * This is normally called by ring-3 as part of the VM termination process, but
560 * may alternatively be called during the support driver session cleanup when
561 * the VM object is destroyed (see GVMM).
562 *
563 * @returns VBox status code.
564 *
565 * @param pGVM The global (ring-0) VM structure.
566 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
567 * thread.
568 * @thread EMT(0) or session clean up thread.
569 */
570VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
571{
572 /*
573 * Check EMT(0) claim if we're called from userland.
574 */
575 if (idCpu != NIL_VMCPUID)
576 {
577 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
578 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
579 if (RT_FAILURE(rc))
580 return rc;
581 }
582
583#ifdef VBOX_WITH_PCI_PASSTHROUGH
584 PciRawR0TermVM(pGVM);
585#endif
586
587 /*
588 * Tell GVMM what we're up to and check that we only do this once.
589 */
590 if (GVMMR0DoingTermVM(pGVM))
591 {
592 GIMR0TermVM(pGVM);
593
594 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
595 * here to make sure we don't leak any shared pages if we crash... */
596#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
597 PGMR0DynMapTermVM(pGVM);
598#endif
599 HMR0TermVM(pGVM);
600 }
601
602 /*
603 * Deregister the logger.
604 */
605 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
606 return VINF_SUCCESS;
607}
608
609
610/**
611 * An interrupt or unhalt force flag is set, deal with it.
612 *
613 * @returns VINF_SUCCESS (or VINF_EM_HALT).
614 * @param pVCpu The cross context virtual CPU structure.
615 * @param uMWait Result from EMMonitorWaitIsActive().
616 * @param enmInterruptibility Guest CPU interruptbility level.
617 */
618static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
619{
620 Assert(!TRPMHasTrap(pVCpu));
621 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
622 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
623
624 /*
625 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
626 */
627 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
628 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
629 {
630 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
631 {
632 uint8_t u8Interrupt = 0;
633 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
634 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
635 if (RT_SUCCESS(rc))
636 {
637 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
638
639 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
640 AssertRCSuccess(rc);
641 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
642 return rc;
643 }
644 }
645 }
646 /*
647 * SMI is not implemented yet, at least not here.
648 */
649 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
650 {
651 return VINF_EM_HALT;
652 }
653 /*
654 * NMI.
655 */
656 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
657 {
658 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
659 {
660 /** @todo later. */
661 return VINF_EM_HALT;
662 }
663 }
664 /*
665 * Nested-guest virtual interrupt.
666 */
667 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
668 {
669 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
670 {
671 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
672 * here before injecting the virtual interrupt. See emR3ForcedActions
673 * for details. */
674 return VINF_EM_HALT;
675 }
676 }
677
678 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
679 {
680 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
681 return VINF_SUCCESS;
682 }
683 if (uMWait > 1)
684 {
685 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
686 return VINF_SUCCESS;
687 }
688
689 return VINF_EM_HALT;
690}
691
692
693/**
694 * This does one round of vmR3HaltGlobal1Halt().
695 *
696 * The rational here is that we'll reduce latency in interrupt situations if we
697 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
698 * MWAIT), but do one round of blocking here instead and hope the interrupt is
699 * raised in the meanwhile.
700 *
701 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
702 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
703 * ring-0 call (unless we're too close to a timer event). When the interrupt
704 * wakes us up, we'll return from ring-0 and EM will by instinct do a
705 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
706 * back to VMMR0EntryFast().
707 *
708 * @returns VINF_SUCCESS or VINF_EM_HALT.
709 * @param pGVM The ring-0 VM structure.
710 * @param pGVCpu The ring-0 virtual CPU structure.
711 *
712 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
713 * the VM module, probably to VMM. Then this would be more weird wrt
714 * parameters and statistics.
715 */
716static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
717{
718 /*
719 * Do spin stat historization.
720 */
721 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
722 { /* likely */ }
723 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
724 {
725 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
726 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
727 }
728 else
729 {
730 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
731 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
732 }
733
734 /*
735 * Flags that makes us go to ring-3.
736 */
737 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
738 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
739 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
740 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
741 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
742 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
743 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
744 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
745
746 /*
747 * Check preconditions.
748 */
749 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
750 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
751 if ( pGVCpu->vmm.s.fMayHaltInRing0
752 && !TRPMHasTrap(pGVCpu)
753 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
754 || uMWait > 1))
755 {
756 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
757 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
758 {
759 /*
760 * Interrupts pending already?
761 */
762 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
763 APICUpdatePendingInterrupts(pGVCpu);
764
765 /*
766 * Flags that wake up from the halted state.
767 */
768 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
769 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
770
771 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
772 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
773 ASMNopPause();
774
775 /*
776 * Check out how long till the next timer event.
777 */
778 uint64_t u64Delta;
779 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
780
781 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
782 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
783 {
784 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
785 APICUpdatePendingInterrupts(pGVCpu);
786
787 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
788 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
789
790 /*
791 * Wait if there is enough time to the next timer event.
792 */
793 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
794 {
795 /* If there are few other CPU cores around, we will procrastinate a
796 little before going to sleep, hoping for some device raising an
797 interrupt or similar. Though, the best thing here would be to
798 dynamically adjust the spin count according to its usfulness or
799 something... */
800 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
801 && RTMpGetOnlineCount() >= 4)
802 {
803 /** @todo Figure out how we can skip this if it hasn't help recently...
804 * @bugref{9172#c12} */
805 uint32_t cSpinLoops = 42;
806 while (cSpinLoops-- > 0)
807 {
808 ASMNopPause();
809 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
810 APICUpdatePendingInterrupts(pGVCpu);
811 ASMNopPause();
812 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
813 {
814 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
815 return VINF_EM_HALT;
816 }
817 ASMNopPause();
818 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
819 {
820 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
821 return VINF_EM_HALT;
822 }
823 ASMNopPause();
824 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
825 {
826 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
827 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
828 }
829 ASMNopPause();
830 }
831 }
832
833 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
834 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
835 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
836 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
837 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
838 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
839 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
840 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
841 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
842 if ( rc == VINF_SUCCESS
843 || rc == VERR_INTERRUPTED)
844
845 {
846 /* Keep some stats like ring-3 does. */
847 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
848 if (cNsOverslept > 50000)
849 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
850 else if (cNsOverslept < -50000)
851 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
852 else
853 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
854
855 /*
856 * Recheck whether we can resume execution or have to go to ring-3.
857 */
858 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
859 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
860 {
861 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
862 APICUpdatePendingInterrupts(pGVCpu);
863 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
864 {
865 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
866 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
867 }
868 }
869 }
870 }
871 }
872 }
873 }
874 return VINF_EM_HALT;
875}
876
877
878/**
879 * VMM ring-0 thread-context callback.
880 *
881 * This does common HM state updating and calls the HM-specific thread-context
882 * callback.
883 *
884 * @param enmEvent The thread-context event.
885 * @param pvUser Opaque pointer to the VMCPU.
886 *
887 * @thread EMT(pvUser)
888 */
889static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
890{
891 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
892
893 switch (enmEvent)
894 {
895 case RTTHREADCTXEVENT_IN:
896 {
897 /*
898 * Linux may call us with preemption enabled (really!) but technically we
899 * cannot get preempted here, otherwise we end up in an infinite recursion
900 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
901 * ad infinitum). Let's just disable preemption for now...
902 */
903 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
904 * preemption after doing the callout (one or two functions up the
905 * call chain). */
906 /** @todo r=ramshankar: See @bugref{5313#c30}. */
907 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
908 RTThreadPreemptDisable(&ParanoidPreemptState);
909
910 /* We need to update the VCPU <-> host CPU mapping. */
911 RTCPUID idHostCpu;
912 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
913 pVCpu->iHostCpuSet = iHostCpuSet;
914 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
915
916 /* In the very unlikely event that the GIP delta for the CPU we're
917 rescheduled needs calculating, try force a return to ring-3.
918 We unfortunately cannot do the measurements right here. */
919 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
920 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
921
922 /* Invoke the HM-specific thread-context callback. */
923 HMR0ThreadCtxCallback(enmEvent, pvUser);
924
925 /* Restore preemption. */
926 RTThreadPreemptRestore(&ParanoidPreemptState);
927 break;
928 }
929
930 case RTTHREADCTXEVENT_OUT:
931 {
932 /* Invoke the HM-specific thread-context callback. */
933 HMR0ThreadCtxCallback(enmEvent, pvUser);
934
935 /*
936 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
937 * have the same host CPU associated with it.
938 */
939 pVCpu->iHostCpuSet = UINT32_MAX;
940 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
941 break;
942 }
943
944 default:
945 /* Invoke the HM-specific thread-context callback. */
946 HMR0ThreadCtxCallback(enmEvent, pvUser);
947 break;
948 }
949}
950
951
952/**
953 * Creates thread switching hook for the current EMT thread.
954 *
955 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
956 * platform does not implement switcher hooks, no hooks will be create and the
957 * member set to NIL_RTTHREADCTXHOOK.
958 *
959 * @returns VBox status code.
960 * @param pVCpu The cross context virtual CPU structure.
961 * @thread EMT(pVCpu)
962 */
963VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
964{
965 VMCPU_ASSERT_EMT(pVCpu);
966 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
967
968#if 1 /* To disable this stuff change to zero. */
969 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
970 if (RT_SUCCESS(rc))
971 return rc;
972#else
973 RT_NOREF(vmmR0ThreadCtxCallback);
974 int rc = VERR_NOT_SUPPORTED;
975#endif
976
977 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
978 if (rc == VERR_NOT_SUPPORTED)
979 return VINF_SUCCESS;
980
981 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
982 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
983}
984
985
986/**
987 * Destroys the thread switching hook for the specified VCPU.
988 *
989 * @param pVCpu The cross context virtual CPU structure.
990 * @remarks Can be called from any thread.
991 */
992VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
993{
994 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
995 AssertRC(rc);
996 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
997}
998
999
1000/**
1001 * Disables the thread switching hook for this VCPU (if we got one).
1002 *
1003 * @param pVCpu The cross context virtual CPU structure.
1004 * @thread EMT(pVCpu)
1005 *
1006 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1007 * this call. This means you have to be careful with what you do!
1008 */
1009VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1010{
1011 /*
1012 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1013 * @bugref{7726#c19} explains the need for this trick:
1014 *
1015 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1016 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1017 * longjmp & normal return to ring-3, which opens a window where we may be
1018 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1019 * the CPU starts executing a different EMT. Both functions first disables
1020 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1021 * an opening for getting preempted.
1022 */
1023 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1024 * all the time. */
1025 /** @todo move this into the context hook disabling if(). */
1026 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1027
1028 /*
1029 * Disable the context hook, if we got one.
1030 */
1031 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1032 {
1033 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1034 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1035 AssertRC(rc);
1036 }
1037}
1038
1039
1040/**
1041 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1042 *
1043 * @returns true if registered, false otherwise.
1044 * @param pVCpu The cross context virtual CPU structure.
1045 */
1046DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1047{
1048 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1049}
1050
1051
1052/**
1053 * Whether thread-context hooks are registered for this VCPU.
1054 *
1055 * @returns true if registered, false otherwise.
1056 * @param pVCpu The cross context virtual CPU structure.
1057 */
1058VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1059{
1060 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1061}
1062
1063
1064/**
1065 * Returns the ring-0 release logger instance.
1066 *
1067 * @returns Pointer to release logger, NULL if not configured.
1068 * @param pVCpu The cross context virtual CPU structure of the caller.
1069 * @thread EMT(pVCpu)
1070 */
1071VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1072{
1073 PVMMR0LOGGER pLogger = pVCpu->vmm.s.pR0RelLoggerR0;
1074 if (pLogger)
1075 return &pLogger->Logger;
1076 return NULL;
1077}
1078
1079
1080#ifdef VBOX_WITH_STATISTICS
1081/**
1082 * Record return code statistics
1083 * @param pVM The cross context VM structure.
1084 * @param pVCpu The cross context virtual CPU structure.
1085 * @param rc The status code.
1086 */
1087static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1088{
1089 /*
1090 * Collect statistics.
1091 */
1092 switch (rc)
1093 {
1094 case VINF_SUCCESS:
1095 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1096 break;
1097 case VINF_EM_RAW_INTERRUPT:
1098 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1099 break;
1100 case VINF_EM_RAW_INTERRUPT_HYPER:
1101 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1102 break;
1103 case VINF_EM_RAW_GUEST_TRAP:
1104 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1105 break;
1106 case VINF_EM_RAW_RING_SWITCH:
1107 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1108 break;
1109 case VINF_EM_RAW_RING_SWITCH_INT:
1110 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1111 break;
1112 case VINF_EM_RAW_STALE_SELECTOR:
1113 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1114 break;
1115 case VINF_EM_RAW_IRET_TRAP:
1116 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1117 break;
1118 case VINF_IOM_R3_IOPORT_READ:
1119 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1120 break;
1121 case VINF_IOM_R3_IOPORT_WRITE:
1122 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1123 break;
1124 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1125 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1126 break;
1127 case VINF_IOM_R3_MMIO_READ:
1128 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1129 break;
1130 case VINF_IOM_R3_MMIO_WRITE:
1131 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1132 break;
1133 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1134 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1135 break;
1136 case VINF_IOM_R3_MMIO_READ_WRITE:
1137 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1138 break;
1139 case VINF_PATM_HC_MMIO_PATCH_READ:
1140 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1141 break;
1142 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1143 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1144 break;
1145 case VINF_CPUM_R3_MSR_READ:
1146 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1147 break;
1148 case VINF_CPUM_R3_MSR_WRITE:
1149 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1150 break;
1151 case VINF_EM_RAW_EMULATE_INSTR:
1152 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1153 break;
1154 case VINF_PATCH_EMULATE_INSTR:
1155 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1156 break;
1157 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1158 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1159 break;
1160 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1161 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1162 break;
1163 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1164 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1165 break;
1166 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1167 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1168 break;
1169 case VINF_CSAM_PENDING_ACTION:
1170 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1171 break;
1172 case VINF_PGM_SYNC_CR3:
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1174 break;
1175 case VINF_PATM_PATCH_INT3:
1176 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1177 break;
1178 case VINF_PATM_PATCH_TRAP_PF:
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1180 break;
1181 case VINF_PATM_PATCH_TRAP_GP:
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1183 break;
1184 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1186 break;
1187 case VINF_EM_RESCHEDULE_REM:
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1189 break;
1190 case VINF_EM_RAW_TO_R3:
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1192 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1194 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1195 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1196 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1198 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1200 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1201 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1202 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1204 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1205 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1206 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1207 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1208 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1209 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1210 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1211 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1212 else
1213 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1214 break;
1215
1216 case VINF_EM_RAW_TIMER_PENDING:
1217 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1218 break;
1219 case VINF_EM_RAW_INTERRUPT_PENDING:
1220 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1221 break;
1222 case VINF_VMM_CALL_HOST:
1223 switch (pVCpu->vmm.s.enmCallRing3Operation)
1224 {
1225 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1226 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1227 break;
1228 case VMMCALLRING3_PDM_LOCK:
1229 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1230 break;
1231 case VMMCALLRING3_PGM_POOL_GROW:
1232 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1233 break;
1234 case VMMCALLRING3_PGM_LOCK:
1235 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1236 break;
1237 case VMMCALLRING3_PGM_MAP_CHUNK:
1238 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1239 break;
1240 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1241 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1242 break;
1243 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1244 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1245 break;
1246 case VMMCALLRING3_VM_SET_ERROR:
1247 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1248 break;
1249 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1250 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1251 break;
1252 case VMMCALLRING3_VM_R0_ASSERTION:
1253 default:
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1255 break;
1256 }
1257 break;
1258 case VINF_PATM_DUPLICATE_FUNCTION:
1259 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1260 break;
1261 case VINF_PGM_CHANGE_MODE:
1262 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1263 break;
1264 case VINF_PGM_POOL_FLUSH_PENDING:
1265 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1266 break;
1267 case VINF_EM_PENDING_REQUEST:
1268 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1269 break;
1270 case VINF_EM_HM_PATCH_TPR_INSTR:
1271 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1272 break;
1273 default:
1274 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1275 break;
1276 }
1277}
1278#endif /* VBOX_WITH_STATISTICS */
1279
1280
1281/**
1282 * The Ring 0 entry point, called by the fast-ioctl path.
1283 *
1284 * @param pGVM The global (ring-0) VM structure.
1285 * @param pVMIgnored The cross context VM structure. The return code is
1286 * stored in pVM->vmm.s.iLastGZRc.
1287 * @param idCpu The Virtual CPU ID of the calling EMT.
1288 * @param enmOperation Which operation to execute.
1289 * @remarks Assume called with interrupts _enabled_.
1290 */
1291VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1292{
1293 RT_NOREF(pVMIgnored);
1294
1295 /*
1296 * Validation.
1297 */
1298 if ( idCpu < pGVM->cCpus
1299 && pGVM->cCpus == pGVM->cCpusUnsafe)
1300 { /*likely*/ }
1301 else
1302 {
1303 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1304 return;
1305 }
1306
1307 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1308 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1309 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1310 && pGVCpu->hNativeThreadR0 == hNativeThread))
1311 { /* likely */ }
1312 else
1313 {
1314 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1315 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1316 return;
1317 }
1318
1319 /*
1320 * SMAP fun.
1321 */
1322 VMM_CHECK_SMAP_SETUP();
1323 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1324
1325 /*
1326 * Perform requested operation.
1327 */
1328 switch (enmOperation)
1329 {
1330 /*
1331 * Run guest code using the available hardware acceleration technology.
1332 */
1333 case VMMR0_DO_HM_RUN:
1334 {
1335 for (;;) /* hlt loop */
1336 {
1337 /*
1338 * Disable preemption.
1339 */
1340 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1341 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1342 RTThreadPreemptDisable(&PreemptState);
1343
1344 /*
1345 * Get the host CPU identifiers, make sure they are valid and that
1346 * we've got a TSC delta for the CPU.
1347 */
1348 RTCPUID idHostCpu;
1349 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1350 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1351 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1352 {
1353 pGVCpu->iHostCpuSet = iHostCpuSet;
1354 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1355
1356 /*
1357 * Update the periodic preemption timer if it's active.
1358 */
1359 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1360 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1361 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1362
1363#ifdef VMM_R0_TOUCH_FPU
1364 /*
1365 * Make sure we've got the FPU state loaded so and we don't need to clear
1366 * CR0.TS and get out of sync with the host kernel when loading the guest
1367 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1368 */
1369 CPUMR0TouchHostFpu();
1370#endif
1371 int rc;
1372 bool fPreemptRestored = false;
1373 if (!HMR0SuspendPending())
1374 {
1375 /*
1376 * Enable the context switching hook.
1377 */
1378 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1379 {
1380 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
1381 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1382 }
1383
1384 /*
1385 * Enter HM context.
1386 */
1387 rc = HMR0Enter(pGVCpu);
1388 if (RT_SUCCESS(rc))
1389 {
1390 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1391
1392 /*
1393 * When preemption hooks are in place, enable preemption now that
1394 * we're in HM context.
1395 */
1396 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1397 {
1398 fPreemptRestored = true;
1399 RTThreadPreemptRestore(&PreemptState);
1400 }
1401
1402 /*
1403 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1404 */
1405 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1406 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1407 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1408
1409 /*
1410 * Assert sanity on the way out. Using manual assertions code here as normal
1411 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1412 */
1413 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1414 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1415 {
1416 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1417 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1418 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1419 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1420 }
1421 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1422 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1423 {
1424 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1425 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1426 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1427 rc = VERR_INVALID_STATE;
1428 }
1429
1430 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1431 }
1432 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1433
1434 /*
1435 * Invalidate the host CPU identifiers before we disable the context
1436 * hook / restore preemption.
1437 */
1438 pGVCpu->iHostCpuSet = UINT32_MAX;
1439 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1440
1441 /*
1442 * Disable context hooks. Due to unresolved cleanup issues, we
1443 * cannot leave the hooks enabled when we return to ring-3.
1444 *
1445 * Note! At the moment HM may also have disabled the hook
1446 * when we get here, but the IPRT API handles that.
1447 */
1448 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1449 {
1450 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1451 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
1452 }
1453 }
1454 /*
1455 * The system is about to go into suspend mode; go back to ring 3.
1456 */
1457 else
1458 {
1459 rc = VINF_EM_RAW_INTERRUPT;
1460 pGVCpu->iHostCpuSet = UINT32_MAX;
1461 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1462 }
1463
1464 /** @todo When HM stops messing with the context hook state, we'll disable
1465 * preemption again before the RTThreadCtxHookDisable call. */
1466 if (!fPreemptRestored)
1467 RTThreadPreemptRestore(&PreemptState);
1468
1469 pGVCpu->vmm.s.iLastGZRc = rc;
1470
1471 /* Fire dtrace probe and collect statistics. */
1472 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1473#ifdef VBOX_WITH_STATISTICS
1474 vmmR0RecordRC(pGVM, pGVCpu, rc);
1475#endif
1476#if 1
1477 /*
1478 * If this is a halt.
1479 */
1480 if (rc != VINF_EM_HALT)
1481 { /* we're not in a hurry for a HLT, so prefer this path */ }
1482 else
1483 {
1484 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1485 if (rc == VINF_SUCCESS)
1486 {
1487 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1488 continue;
1489 }
1490 pGVCpu->vmm.s.cR0HaltsToRing3++;
1491 }
1492#endif
1493 }
1494 /*
1495 * Invalid CPU set index or TSC delta in need of measuring.
1496 */
1497 else
1498 {
1499 pGVCpu->iHostCpuSet = UINT32_MAX;
1500 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1501 RTThreadPreemptRestore(&PreemptState);
1502 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1503 {
1504 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1505 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1506 0 /*default cTries*/);
1507 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1508 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1509 else
1510 pGVCpu->vmm.s.iLastGZRc = rc;
1511 }
1512 else
1513 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1514 }
1515 break;
1516
1517 } /* halt loop. */
1518 break;
1519 }
1520
1521#ifdef VBOX_WITH_NEM_R0
1522# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1523 case VMMR0_DO_NEM_RUN:
1524 {
1525 /*
1526 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1527 */
1528 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1529# ifdef VBOXSTRICTRC_STRICT_ENABLED
1530 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1531# else
1532 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1533# endif
1534 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1535 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1536
1537 pGVCpu->vmm.s.iLastGZRc = rc;
1538
1539 /*
1540 * Fire dtrace probe and collect statistics.
1541 */
1542 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1543# ifdef VBOX_WITH_STATISTICS
1544 vmmR0RecordRC(pGVM, pGVCpu, rc);
1545# endif
1546 break;
1547 }
1548# endif
1549#endif
1550
1551 /*
1552 * For profiling.
1553 */
1554 case VMMR0_DO_NOP:
1555 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1556 break;
1557
1558 /*
1559 * Shouldn't happen.
1560 */
1561 default:
1562 AssertMsgFailed(("%#x\n", enmOperation));
1563 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1564 break;
1565 }
1566 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1567}
1568
1569
1570/**
1571 * Validates a session or VM session argument.
1572 *
1573 * @returns true / false accordingly.
1574 * @param pGVM The global (ring-0) VM structure.
1575 * @param pClaimedSession The session claim to validate.
1576 * @param pSession The session argument.
1577 */
1578DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1579{
1580 /* This must be set! */
1581 if (!pSession)
1582 return false;
1583
1584 /* Only one out of the two. */
1585 if (pGVM && pClaimedSession)
1586 return false;
1587 if (pGVM)
1588 pClaimedSession = pGVM->pSession;
1589 return pClaimedSession == pSession;
1590}
1591
1592
1593/**
1594 * VMMR0EntryEx worker function, either called directly or when ever possible
1595 * called thru a longjmp so we can exit safely on failure.
1596 *
1597 * @returns VBox status code.
1598 * @param pGVM The global (ring-0) VM structure.
1599 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1600 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1601 * @param enmOperation Which operation to execute.
1602 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1603 * The support driver validates this if it's present.
1604 * @param u64Arg Some simple constant argument.
1605 * @param pSession The session of the caller.
1606 *
1607 * @remarks Assume called with interrupts _enabled_.
1608 */
1609static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1610 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1611{
1612 /*
1613 * Validate pGVM and idCpu for consistency and validity.
1614 */
1615 if (pGVM != NULL)
1616 {
1617 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1618 { /* likely */ }
1619 else
1620 {
1621 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1622 return VERR_INVALID_POINTER;
1623 }
1624
1625 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1626 { /* likely */ }
1627 else
1628 {
1629 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1630 return VERR_INVALID_PARAMETER;
1631 }
1632
1633 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1634 && pGVM->enmVMState <= VMSTATE_TERMINATED
1635 && pGVM->pSession == pSession
1636 && pGVM->pSelf == pGVM))
1637 { /* likely */ }
1638 else
1639 {
1640 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1641 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1642 return VERR_INVALID_POINTER;
1643 }
1644 }
1645 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1646 { /* likely */ }
1647 else
1648 {
1649 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1650 return VERR_INVALID_PARAMETER;
1651 }
1652
1653 /*
1654 * SMAP fun.
1655 */
1656 VMM_CHECK_SMAP_SETUP();
1657 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1658
1659 /*
1660 * Process the request.
1661 */
1662 int rc;
1663 switch (enmOperation)
1664 {
1665 /*
1666 * GVM requests
1667 */
1668 case VMMR0_DO_GVMM_CREATE_VM:
1669 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1670 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1671 else
1672 rc = VERR_INVALID_PARAMETER;
1673 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1674 break;
1675
1676 case VMMR0_DO_GVMM_DESTROY_VM:
1677 if (pReqHdr == NULL && u64Arg == 0)
1678 rc = GVMMR0DestroyVM(pGVM);
1679 else
1680 rc = VERR_INVALID_PARAMETER;
1681 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1682 break;
1683
1684 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1685 if (pGVM != NULL)
1686 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1687 else
1688 rc = VERR_INVALID_PARAMETER;
1689 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1690 break;
1691
1692 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1693 if (pGVM != NULL)
1694 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1695 else
1696 rc = VERR_INVALID_PARAMETER;
1697 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1698 break;
1699
1700 case VMMR0_DO_GVMM_SCHED_HALT:
1701 if (pReqHdr)
1702 return VERR_INVALID_PARAMETER;
1703 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1704 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1705 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1706 break;
1707
1708 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1709 if (pReqHdr || u64Arg)
1710 return VERR_INVALID_PARAMETER;
1711 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1712 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1713 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1714 break;
1715
1716 case VMMR0_DO_GVMM_SCHED_POKE:
1717 if (pReqHdr || u64Arg)
1718 return VERR_INVALID_PARAMETER;
1719 rc = GVMMR0SchedPoke(pGVM, idCpu);
1720 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1721 break;
1722
1723 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1724 if (u64Arg)
1725 return VERR_INVALID_PARAMETER;
1726 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1727 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1728 break;
1729
1730 case VMMR0_DO_GVMM_SCHED_POLL:
1731 if (pReqHdr || u64Arg > 1)
1732 return VERR_INVALID_PARAMETER;
1733 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1734 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1735 break;
1736
1737 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1738 if (u64Arg)
1739 return VERR_INVALID_PARAMETER;
1740 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1741 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1742 break;
1743
1744 case VMMR0_DO_GVMM_RESET_STATISTICS:
1745 if (u64Arg)
1746 return VERR_INVALID_PARAMETER;
1747 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1748 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1749 break;
1750
1751 /*
1752 * Initialize the R0 part of a VM instance.
1753 */
1754 case VMMR0_DO_VMMR0_INIT:
1755 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1756 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1757 break;
1758
1759 /*
1760 * Does EMT specific ring-0 init.
1761 */
1762 case VMMR0_DO_VMMR0_INIT_EMT:
1763 rc = vmmR0InitVMEmt(pGVM, idCpu);
1764 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1765 break;
1766
1767 /*
1768 * Terminate the R0 part of a VM instance.
1769 */
1770 case VMMR0_DO_VMMR0_TERM:
1771 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1772 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1773 break;
1774
1775 /*
1776 * Attempt to enable hm mode and check the current setting.
1777 */
1778 case VMMR0_DO_HM_ENABLE:
1779 rc = HMR0EnableAllCpus(pGVM);
1780 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1781 break;
1782
1783 /*
1784 * Setup the hardware accelerated session.
1785 */
1786 case VMMR0_DO_HM_SETUP_VM:
1787 rc = HMR0SetupVM(pGVM);
1788 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1789 break;
1790
1791 /*
1792 * PGM wrappers.
1793 */
1794 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1795 if (idCpu == NIL_VMCPUID)
1796 return VERR_INVALID_CPU_ID;
1797 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1798 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1799 break;
1800
1801 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1802 if (idCpu == NIL_VMCPUID)
1803 return VERR_INVALID_CPU_ID;
1804 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1805 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1806 break;
1807
1808 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1809 if (idCpu == NIL_VMCPUID)
1810 return VERR_INVALID_CPU_ID;
1811 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1812 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1813 break;
1814
1815 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1816 if (idCpu != 0)
1817 return VERR_INVALID_CPU_ID;
1818 rc = PGMR0PhysSetupIoMmu(pGVM);
1819 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1820 break;
1821
1822 case VMMR0_DO_PGM_POOL_GROW:
1823 if (idCpu == NIL_VMCPUID)
1824 return VERR_INVALID_CPU_ID;
1825 rc = PGMR0PoolGrow(pGVM);
1826 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1827 break;
1828
1829 /*
1830 * GMM wrappers.
1831 */
1832 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1833 if (u64Arg)
1834 return VERR_INVALID_PARAMETER;
1835 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1836 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1837 break;
1838
1839 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1840 if (u64Arg)
1841 return VERR_INVALID_PARAMETER;
1842 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1843 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1844 break;
1845
1846 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1847 if (u64Arg)
1848 return VERR_INVALID_PARAMETER;
1849 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1850 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1851 break;
1852
1853 case VMMR0_DO_GMM_FREE_PAGES:
1854 if (u64Arg)
1855 return VERR_INVALID_PARAMETER;
1856 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1857 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1858 break;
1859
1860 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1861 if (u64Arg)
1862 return VERR_INVALID_PARAMETER;
1863 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1864 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1865 break;
1866
1867 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1868 if (u64Arg)
1869 return VERR_INVALID_PARAMETER;
1870 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1871 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1872 break;
1873
1874 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1875 if (idCpu == NIL_VMCPUID)
1876 return VERR_INVALID_CPU_ID;
1877 if (u64Arg)
1878 return VERR_INVALID_PARAMETER;
1879 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1880 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1881 break;
1882
1883 case VMMR0_DO_GMM_BALLOONED_PAGES:
1884 if (u64Arg)
1885 return VERR_INVALID_PARAMETER;
1886 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1887 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1888 break;
1889
1890 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1891 if (u64Arg)
1892 return VERR_INVALID_PARAMETER;
1893 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1894 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1895 break;
1896
1897 case VMMR0_DO_GMM_SEED_CHUNK:
1898 if (pReqHdr)
1899 return VERR_INVALID_PARAMETER;
1900 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1901 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1902 break;
1903
1904 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1905 if (idCpu == NIL_VMCPUID)
1906 return VERR_INVALID_CPU_ID;
1907 if (u64Arg)
1908 return VERR_INVALID_PARAMETER;
1909 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1910 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1911 break;
1912
1913 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1914 if (idCpu == NIL_VMCPUID)
1915 return VERR_INVALID_CPU_ID;
1916 if (u64Arg)
1917 return VERR_INVALID_PARAMETER;
1918 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1919 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1920 break;
1921
1922 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1923 if (idCpu == NIL_VMCPUID)
1924 return VERR_INVALID_CPU_ID;
1925 if ( u64Arg
1926 || pReqHdr)
1927 return VERR_INVALID_PARAMETER;
1928 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1929 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1930 break;
1931
1932#ifdef VBOX_WITH_PAGE_SHARING
1933 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1934 {
1935 if (idCpu == NIL_VMCPUID)
1936 return VERR_INVALID_CPU_ID;
1937 if ( u64Arg
1938 || pReqHdr)
1939 return VERR_INVALID_PARAMETER;
1940 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1941 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1942 break;
1943 }
1944#endif
1945
1946#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1947 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1948 if (u64Arg)
1949 return VERR_INVALID_PARAMETER;
1950 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1951 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1952 break;
1953#endif
1954
1955 case VMMR0_DO_GMM_QUERY_STATISTICS:
1956 if (u64Arg)
1957 return VERR_INVALID_PARAMETER;
1958 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1959 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1960 break;
1961
1962 case VMMR0_DO_GMM_RESET_STATISTICS:
1963 if (u64Arg)
1964 return VERR_INVALID_PARAMETER;
1965 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1966 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1967 break;
1968
1969 /*
1970 * A quick GCFGM mock-up.
1971 */
1972 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1973 case VMMR0_DO_GCFGM_SET_VALUE:
1974 case VMMR0_DO_GCFGM_QUERY_VALUE:
1975 {
1976 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1977 return VERR_INVALID_PARAMETER;
1978 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1979 if (pReq->Hdr.cbReq != sizeof(*pReq))
1980 return VERR_INVALID_PARAMETER;
1981 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1982 {
1983 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1984 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1985 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1986 }
1987 else
1988 {
1989 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1990 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1991 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1992 }
1993 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1994 break;
1995 }
1996
1997 /*
1998 * PDM Wrappers.
1999 */
2000 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2001 {
2002 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2003 return VERR_INVALID_PARAMETER;
2004 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2005 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2006 break;
2007 }
2008
2009 case VMMR0_DO_PDM_DEVICE_CREATE:
2010 {
2011 if (!pReqHdr || u64Arg || idCpu != 0)
2012 return VERR_INVALID_PARAMETER;
2013 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2014 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2015 break;
2016 }
2017
2018 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2019 {
2020 if (!pReqHdr || u64Arg)
2021 return VERR_INVALID_PARAMETER;
2022 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2023 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2024 break;
2025 }
2026
2027 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2028 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2029 {
2030 if (!pReqHdr || u64Arg || idCpu != 0)
2031 return VERR_INVALID_PARAMETER;
2032 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2033 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2034 break;
2035 }
2036
2037 /*
2038 * Requests to the internal networking service.
2039 */
2040 case VMMR0_DO_INTNET_OPEN:
2041 {
2042 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2043 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2044 return VERR_INVALID_PARAMETER;
2045 rc = IntNetR0OpenReq(pSession, pReq);
2046 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2047 break;
2048 }
2049
2050 case VMMR0_DO_INTNET_IF_CLOSE:
2051 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2052 return VERR_INVALID_PARAMETER;
2053 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2054 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2055 break;
2056
2057
2058 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2059 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2060 return VERR_INVALID_PARAMETER;
2061 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2062 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2063 break;
2064
2065 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2066 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2067 return VERR_INVALID_PARAMETER;
2068 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2069 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2070 break;
2071
2072 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2073 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2074 return VERR_INVALID_PARAMETER;
2075 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2076 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2077 break;
2078
2079 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2080 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2081 return VERR_INVALID_PARAMETER;
2082 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2083 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2084 break;
2085
2086 case VMMR0_DO_INTNET_IF_SEND:
2087 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2088 return VERR_INVALID_PARAMETER;
2089 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2090 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2091 break;
2092
2093 case VMMR0_DO_INTNET_IF_WAIT:
2094 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2095 return VERR_INVALID_PARAMETER;
2096 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2097 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2098 break;
2099
2100 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2101 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2102 return VERR_INVALID_PARAMETER;
2103 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2104 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2105 break;
2106
2107#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2108 /*
2109 * Requests to host PCI driver service.
2110 */
2111 case VMMR0_DO_PCIRAW_REQ:
2112 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2113 return VERR_INVALID_PARAMETER;
2114 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2115 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2116 break;
2117#endif
2118
2119 /*
2120 * NEM requests.
2121 */
2122#ifdef VBOX_WITH_NEM_R0
2123# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2124 case VMMR0_DO_NEM_INIT_VM:
2125 if (u64Arg || pReqHdr || idCpu != 0)
2126 return VERR_INVALID_PARAMETER;
2127 rc = NEMR0InitVM(pGVM);
2128 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2129 break;
2130
2131 case VMMR0_DO_NEM_INIT_VM_PART_2:
2132 if (u64Arg || pReqHdr || idCpu != 0)
2133 return VERR_INVALID_PARAMETER;
2134 rc = NEMR0InitVMPart2(pGVM);
2135 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2136 break;
2137
2138 case VMMR0_DO_NEM_MAP_PAGES:
2139 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2140 return VERR_INVALID_PARAMETER;
2141 rc = NEMR0MapPages(pGVM, idCpu);
2142 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2143 break;
2144
2145 case VMMR0_DO_NEM_UNMAP_PAGES:
2146 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2147 return VERR_INVALID_PARAMETER;
2148 rc = NEMR0UnmapPages(pGVM, idCpu);
2149 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2150 break;
2151
2152 case VMMR0_DO_NEM_EXPORT_STATE:
2153 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2154 return VERR_INVALID_PARAMETER;
2155 rc = NEMR0ExportState(pGVM, idCpu);
2156 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2157 break;
2158
2159 case VMMR0_DO_NEM_IMPORT_STATE:
2160 if (pReqHdr || idCpu == NIL_VMCPUID)
2161 return VERR_INVALID_PARAMETER;
2162 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2163 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2164 break;
2165
2166 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2167 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2168 return VERR_INVALID_PARAMETER;
2169 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2170 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2171 break;
2172
2173 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2174 if (pReqHdr || idCpu == NIL_VMCPUID)
2175 return VERR_INVALID_PARAMETER;
2176 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2177 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2178 break;
2179
2180 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2181 if (u64Arg || pReqHdr)
2182 return VERR_INVALID_PARAMETER;
2183 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2184 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2185 break;
2186
2187# if 1 && defined(DEBUG_bird)
2188 case VMMR0_DO_NEM_EXPERIMENT:
2189 if (pReqHdr)
2190 return VERR_INVALID_PARAMETER;
2191 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2192 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2193 break;
2194# endif
2195# endif
2196#endif
2197
2198 /*
2199 * IOM requests.
2200 */
2201 case VMMR0_DO_IOM_GROW_IO_PORTS:
2202 {
2203 if (pReqHdr || idCpu != 0)
2204 return VERR_INVALID_PARAMETER;
2205 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2206 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2207 break;
2208 }
2209
2210 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2211 {
2212 if (pReqHdr || idCpu != 0)
2213 return VERR_INVALID_PARAMETER;
2214 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2215 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2216 break;
2217 }
2218
2219 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2220 {
2221 if (pReqHdr || idCpu != 0)
2222 return VERR_INVALID_PARAMETER;
2223 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2224 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2225 break;
2226 }
2227
2228 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2229 {
2230 if (pReqHdr || idCpu != 0)
2231 return VERR_INVALID_PARAMETER;
2232 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2233 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2234 break;
2235 }
2236
2237 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2238 {
2239 if (pReqHdr || idCpu != 0)
2240 return VERR_INVALID_PARAMETER;
2241 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2242 if (RT_SUCCESS(rc))
2243 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2244 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2245 break;
2246 }
2247
2248 /*
2249 * DBGF requests.
2250 */
2251#ifdef VBOX_WITH_DBGF_TRACING
2252 case VMMR0_DO_DBGF_TRACER_CREATE:
2253 {
2254 if (!pReqHdr || u64Arg || idCpu != 0)
2255 return VERR_INVALID_PARAMETER;
2256 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2257 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2258 break;
2259 }
2260
2261 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2262 {
2263 if (!pReqHdr || u64Arg)
2264 return VERR_INVALID_PARAMETER;
2265# if 0 /** @todo */
2266 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2267# else
2268 rc = VERR_NOT_IMPLEMENTED;
2269# endif
2270 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2271 break;
2272 }
2273#endif
2274
2275 case VMMR0_DO_DBGF_BP_INIT:
2276 {
2277 if (!pReqHdr || u64Arg || idCpu != 0)
2278 return VERR_INVALID_PARAMETER;
2279 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2280 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2281 break;
2282 }
2283
2284 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2285 {
2286 if (!pReqHdr || u64Arg || idCpu != 0)
2287 return VERR_INVALID_PARAMETER;
2288 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2289 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2290 break;
2291 }
2292
2293 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2294 {
2295 if (!pReqHdr || u64Arg || idCpu != 0)
2296 return VERR_INVALID_PARAMETER;
2297 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2298 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2299 break;
2300 }
2301
2302 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2303 {
2304 if (!pReqHdr || u64Arg || idCpu != 0)
2305 return VERR_INVALID_PARAMETER;
2306 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2307 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2308 break;
2309 }
2310
2311 /*
2312 * For profiling.
2313 */
2314 case VMMR0_DO_NOP:
2315 case VMMR0_DO_SLOW_NOP:
2316 return VINF_SUCCESS;
2317
2318 /*
2319 * For testing Ring-0 APIs invoked in this environment.
2320 */
2321 case VMMR0_DO_TESTS:
2322 /** @todo make new test */
2323 return VINF_SUCCESS;
2324
2325 default:
2326 /*
2327 * We're returning VERR_NOT_SUPPORT here so we've got something else
2328 * than -1 which the interrupt gate glue code might return.
2329 */
2330 Log(("operation %#x is not supported\n", enmOperation));
2331 return VERR_NOT_SUPPORTED;
2332 }
2333 return rc;
2334}
2335
2336
2337/**
2338 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2339 */
2340typedef struct VMMR0ENTRYEXARGS
2341{
2342 PGVM pGVM;
2343 VMCPUID idCpu;
2344 VMMR0OPERATION enmOperation;
2345 PSUPVMMR0REQHDR pReq;
2346 uint64_t u64Arg;
2347 PSUPDRVSESSION pSession;
2348} VMMR0ENTRYEXARGS;
2349/** Pointer to a vmmR0EntryExWrapper argument package. */
2350typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2351
2352/**
2353 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2354 *
2355 * @returns VBox status code.
2356 * @param pvArgs The argument package
2357 */
2358static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2359{
2360 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2361 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2362 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2363 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2364 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2365 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2366}
2367
2368
2369/**
2370 * The Ring 0 entry point, called by the support library (SUP).
2371 *
2372 * @returns VBox status code.
2373 * @param pGVM The global (ring-0) VM structure.
2374 * @param pVM The cross context VM structure.
2375 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2376 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2377 * @param enmOperation Which operation to execute.
2378 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2379 * @param u64Arg Some simple constant argument.
2380 * @param pSession The session of the caller.
2381 * @remarks Assume called with interrupts _enabled_.
2382 */
2383VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2384 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2385{
2386 /*
2387 * Requests that should only happen on the EMT thread will be
2388 * wrapped in a setjmp so we can assert without causing trouble.
2389 */
2390 if ( pVM != NULL
2391 && pGVM != NULL
2392 && pVM == pGVM /** @todo drop pGVM */
2393 && idCpu < pGVM->cCpus
2394 && pGVM->pSession == pSession
2395 && pGVM->pSelf == pVM)
2396 {
2397 switch (enmOperation)
2398 {
2399 /* These might/will be called before VMMR3Init. */
2400 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2401 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2402 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2403 case VMMR0_DO_GMM_FREE_PAGES:
2404 case VMMR0_DO_GMM_BALLOONED_PAGES:
2405 /* On the mac we might not have a valid jmp buf, so check these as well. */
2406 case VMMR0_DO_VMMR0_INIT:
2407 case VMMR0_DO_VMMR0_TERM:
2408
2409 case VMMR0_DO_PDM_DEVICE_CREATE:
2410 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2411 case VMMR0_DO_IOM_GROW_IO_PORTS:
2412 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2413 case VMMR0_DO_DBGF_BP_INIT:
2414 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2415 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2416 {
2417 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2418 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2419 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2420 && pGVCpu->hNativeThreadR0 == hNativeThread))
2421 {
2422 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2423 break;
2424
2425 /** @todo validate this EMT claim... GVM knows. */
2426 VMMR0ENTRYEXARGS Args;
2427 Args.pGVM = pGVM;
2428 Args.idCpu = idCpu;
2429 Args.enmOperation = enmOperation;
2430 Args.pReq = pReq;
2431 Args.u64Arg = u64Arg;
2432 Args.pSession = pSession;
2433 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2434 }
2435 return VERR_VM_THREAD_NOT_EMT;
2436 }
2437
2438 default:
2439 case VMMR0_DO_PGM_POOL_GROW:
2440 break;
2441 }
2442 }
2443 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2444}
2445
2446
2447/**
2448 * Checks whether we've armed the ring-0 long jump machinery.
2449 *
2450 * @returns @c true / @c false
2451 * @param pVCpu The cross context virtual CPU structure.
2452 * @thread EMT
2453 * @sa VMMIsLongJumpArmed
2454 */
2455VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2456{
2457#ifdef RT_ARCH_X86
2458 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2459 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2460#else
2461 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2462 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2463#endif
2464}
2465
2466
2467/**
2468 * Checks whether we've done a ring-3 long jump.
2469 *
2470 * @returns @c true / @c false
2471 * @param pVCpu The cross context virtual CPU structure.
2472 * @thread EMT
2473 */
2474VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2475{
2476 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2477}
2478
2479
2480/**
2481 * Internal R0 logger worker: Flush logger.
2482 *
2483 * @param pLogger The logger instance to flush.
2484 * @remark This function must be exported!
2485 */
2486VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2487{
2488#ifdef LOG_ENABLED
2489 /*
2490 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2491 * (This is a bit paranoid code.)
2492 */
2493 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2494 if ( !VALID_PTR(pR0Logger)
2495 || !VALID_PTR(pR0Logger + 1)
2496 || pLogger->u32Magic != RTLOGGER_MAGIC)
2497 {
2498# ifdef DEBUG
2499 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2500# endif
2501 return;
2502 }
2503 if (pR0Logger->fFlushingDisabled)
2504 return; /* quietly */
2505
2506 PVMCC pVM = pR0Logger->pVM;
2507 if ( !VALID_PTR(pVM)
2508 || pVM->pSelf != pVM)
2509 {
2510# ifdef DEBUG
2511 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2512# endif
2513 return;
2514 }
2515
2516 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2517 if (pVCpu)
2518 {
2519 /*
2520 * Check that the jump buffer is armed.
2521 */
2522# ifdef RT_ARCH_X86
2523 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2524 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2525# else
2526 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2527 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2528# endif
2529 {
2530# ifdef DEBUG
2531 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2532# endif
2533 return;
2534 }
2535 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2536 }
2537# ifdef DEBUG
2538 else
2539 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2540# endif
2541#else
2542 NOREF(pLogger);
2543#endif /* LOG_ENABLED */
2544}
2545
2546#ifdef LOG_ENABLED
2547
2548/**
2549 * Disables flushing of the ring-0 debug log.
2550 *
2551 * @param pVCpu The cross context virtual CPU structure.
2552 */
2553VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2554{
2555 if (pVCpu->vmm.s.pR0LoggerR0)
2556 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2557 if (pVCpu->vmm.s.pR0RelLoggerR0)
2558 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2559}
2560
2561
2562/**
2563 * Enables flushing of the ring-0 debug log.
2564 *
2565 * @param pVCpu The cross context virtual CPU structure.
2566 */
2567VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2568{
2569 if (pVCpu->vmm.s.pR0LoggerR0)
2570 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2571 if (pVCpu->vmm.s.pR0RelLoggerR0)
2572 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2573}
2574
2575
2576/**
2577 * Checks if log flushing is disabled or not.
2578 *
2579 * @param pVCpu The cross context virtual CPU structure.
2580 */
2581VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2582{
2583 if (pVCpu->vmm.s.pR0LoggerR0)
2584 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2585 if (pVCpu->vmm.s.pR0RelLoggerR0)
2586 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2587 return true;
2588}
2589
2590#endif /* LOG_ENABLED */
2591
2592/*
2593 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2594 */
2595DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2596{
2597 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2598 if (pGVCpu)
2599 {
2600 PVMCPUCC pVCpu = pGVCpu;
2601 if (RT_VALID_PTR(pVCpu))
2602 {
2603 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2604 if (RT_VALID_PTR(pVmmLogger))
2605 {
2606 if ( pVmmLogger->fCreated
2607 && pVmmLogger->pVM == pGVCpu->pGVM)
2608 {
2609 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2610 return NULL;
2611 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2612 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2613 if ( iGroup != UINT16_MAX
2614 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2615 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2616 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2617 return NULL;
2618 return &pVmmLogger->Logger;
2619 }
2620 }
2621 }
2622 }
2623 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2624}
2625
2626
2627/*
2628 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2629 *
2630 * @returns true if the breakpoint should be hit, false if it should be ignored.
2631 */
2632DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2633{
2634#if 0
2635 return true;
2636#else
2637 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2638 if (pVM)
2639 {
2640 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2641
2642 if (pVCpu)
2643 {
2644# ifdef RT_ARCH_X86
2645 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2646 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2647# else
2648 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2649 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2650# endif
2651 {
2652 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2653 return RT_FAILURE_NP(rc);
2654 }
2655 }
2656 }
2657# ifdef RT_OS_LINUX
2658 return true;
2659# else
2660 return false;
2661# endif
2662#endif
2663}
2664
2665
2666/*
2667 * Override this so we can push it up to ring-3.
2668 */
2669DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2670{
2671 /*
2672 * To the log.
2673 */
2674 LogAlways(("\n!!R0-Assertion Failed!!\n"
2675 "Expression: %s\n"
2676 "Location : %s(%d) %s\n",
2677 pszExpr, pszFile, uLine, pszFunction));
2678
2679 /*
2680 * To the global VMM buffer.
2681 */
2682 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2683 if (pVM)
2684 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2685 "\n!!R0-Assertion Failed!!\n"
2686 "Expression: %.*s\n"
2687 "Location : %s(%d) %s\n",
2688 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2689 pszFile, uLine, pszFunction);
2690
2691 /*
2692 * Continue the normal way.
2693 */
2694 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2695}
2696
2697
2698/**
2699 * Callback for RTLogFormatV which writes to the ring-3 log port.
2700 * See PFNLOGOUTPUT() for details.
2701 */
2702static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2703{
2704 for (size_t i = 0; i < cbChars; i++)
2705 {
2706 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2707 }
2708
2709 NOREF(pv);
2710 return cbChars;
2711}
2712
2713
2714/*
2715 * Override this so we can push it up to ring-3.
2716 */
2717DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2718{
2719 va_list vaCopy;
2720
2721 /*
2722 * Push the message to the loggers.
2723 */
2724 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2725 if (pLog)
2726 {
2727 va_copy(vaCopy, va);
2728 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2729 va_end(vaCopy);
2730 }
2731 pLog = RTLogRelGetDefaultInstance();
2732 if (pLog)
2733 {
2734 va_copy(vaCopy, va);
2735 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2736 va_end(vaCopy);
2737 }
2738
2739 /*
2740 * Push it to the global VMM buffer.
2741 */
2742 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2743 if (pVM)
2744 {
2745 va_copy(vaCopy, va);
2746 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2747 va_end(vaCopy);
2748 }
2749
2750 /*
2751 * Continue the normal way.
2752 */
2753 RTAssertMsg2V(pszFormat, va);
2754}
2755
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette