VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 87466

Last change on this file since 87466 was 87107, checked in by vboxsync, 4 years ago

VMM/DBGF: Started implementing the breakpoint owner management, bugref:9837

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 99.2 KB
Line 
1/* $Id: VMMR0.cpp 87107 2020-12-19 15:10:18Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mp.h>
58#include <iprt/once.h>
59#include <iprt/stdarg.h>
60#include <iprt/string.h>
61#include <iprt/thread.h>
62#include <iprt/timer.h>
63#include <iprt/time.h>
64
65#include "dtrace/VBoxVMM.h"
66
67
68#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
69# pragma intrinsic(_AddressOfReturnAddress)
70#endif
71
72#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
73# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
74#endif
75
76
77
78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
81/** @def VMM_CHECK_SMAP_SETUP
82 * SMAP check setup. */
83/** @def VMM_CHECK_SMAP_CHECK
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
85 * it will be logged and @a a_BadExpr is executed. */
86/** @def VMM_CHECK_SMAP_CHECK2
87 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
88 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
89 * executed. */
90#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
91# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
92# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
102 a_BadExpr; \
103 } \
104 } \
105 } while (0)
106# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
107 do { \
108 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
109 { \
110 RTCCUINTREG fEflCheck = ASMGetFlags(); \
111 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
112 { /* likely */ } \
113 else if (a_pGVM) \
114 { \
115 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
116 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
117 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
118 a_BadExpr; \
119 } \
120 else \
121 { \
122 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
123 a_BadExpr; \
124 } \
125 } \
126 } while (0)
127#else
128# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
129# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
130# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
131#endif
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137RT_C_DECLS_BEGIN
138#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
139extern uint64_t __udivdi3(uint64_t, uint64_t);
140extern uint64_t __umoddi3(uint64_t, uint64_t);
141#endif
142RT_C_DECLS_END
143
144
145/*********************************************************************************************************************************
146* Global Variables *
147*********************************************************************************************************************************/
148/** Drag in necessary library bits.
149 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
150struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
151{
152 { (PFNRT)RTCrc32 },
153 { (PFNRT)RTOnce },
154#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
155 { (PFNRT)__udivdi3 },
156 { (PFNRT)__umoddi3 },
157#endif
158 { NULL }
159};
160
161#ifdef RT_OS_SOLARIS
162/* Dependency information for the native solaris loader. */
163extern "C" { char _depends_on[] = "vboxdrv"; }
164#endif
165
166
167/**
168 * Initialize the module.
169 * This is called when we're first loaded.
170 *
171 * @returns 0 on success.
172 * @returns VBox status on failure.
173 * @param hMod Image handle for use in APIs.
174 */
175DECLEXPORT(int) ModuleInit(void *hMod)
176{
177 VMM_CHECK_SMAP_SETUP();
178 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
179
180#ifdef VBOX_WITH_DTRACE_R0
181 /*
182 * The first thing to do is register the static tracepoints.
183 * (Deregistration is automatic.)
184 */
185 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
186 if (RT_FAILURE(rc2))
187 return rc2;
188#endif
189 LogFlow(("ModuleInit:\n"));
190
191#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
192 /*
193 * Display the CMOS debug code.
194 */
195 ASMOutU8(0x72, 0x03);
196 uint8_t bDebugCode = ASMInU8(0x73);
197 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
198 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
199#endif
200
201 /*
202 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
203 */
204 int rc = vmmInitFormatTypes();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = GVMMR0Init();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = GMMR0Init();
213 if (RT_SUCCESS(rc))
214 {
215 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
216 rc = HMR0Init();
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220
221 PDMR0Init(hMod);
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223
224 rc = PGMRegisterStringFormatTypes();
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
229 rc = PGMR0DynMapInit();
230#endif
231 if (RT_SUCCESS(rc))
232 {
233 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
234 rc = IntNetR0Init();
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_PCI_PASSTHROUGH
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = PciRawR0Init();
240#endif
241 if (RT_SUCCESS(rc))
242 {
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = CPUMR0ModuleInit();
245 if (RT_SUCCESS(rc))
246 {
247#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = vmmR0TripleFaultHackInit();
250 if (RT_SUCCESS(rc))
251#endif
252 {
253 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
254 if (RT_SUCCESS(rc))
255 {
256 LogFlow(("ModuleInit: returns success\n"));
257 return VINF_SUCCESS;
258 }
259 }
260
261 /*
262 * Bail out.
263 */
264#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
265 vmmR0TripleFaultHackTerm();
266#endif
267 }
268 else
269 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
270#ifdef VBOX_WITH_PCI_PASSTHROUGH
271 PciRawR0Term();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
276 IntNetR0Term();
277 }
278 else
279 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
280#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
281 PGMR0DynMapTerm();
282#endif
283 }
284 else
285 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
286 PGMDeregisterStringFormatTypes();
287 }
288 else
289 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
290 HMR0Term();
291 }
292 else
293 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
294 GMMR0Term();
295 }
296 else
297 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
298 GVMMR0Term();
299 }
300 else
301 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
302 vmmTermFormatTypes();
303 }
304 else
305 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
306
307 LogFlow(("ModuleInit: failed %Rrc\n", rc));
308 return rc;
309}
310
311
312/**
313 * Terminate the module.
314 * This is called when we're finally unloaded.
315 *
316 * @param hMod Image handle for use in APIs.
317 */
318DECLEXPORT(void) ModuleTerm(void *hMod)
319{
320 NOREF(hMod);
321 LogFlow(("ModuleTerm:\n"));
322
323 /*
324 * Terminate the CPUM module (Local APIC cleanup).
325 */
326 CPUMR0ModuleTerm();
327
328 /*
329 * Terminate the internal network service.
330 */
331 IntNetR0Term();
332
333 /*
334 * PGM (Darwin), HM and PciRaw global cleanup.
335 */
336#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
337 PGMR0DynMapTerm();
338#endif
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0Term();
341#endif
342 PGMDeregisterStringFormatTypes();
343 HMR0Term();
344#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
345 vmmR0TripleFaultHackTerm();
346#endif
347
348 /*
349 * Destroy the GMM and GVMM instances.
350 */
351 GMMR0Term();
352 GVMMR0Term();
353
354 vmmTermFormatTypes();
355
356 LogFlow(("ModuleTerm: returns\n"));
357}
358
359
360/**
361 * Initiates the R0 driver for a particular VM instance.
362 *
363 * @returns VBox status code.
364 *
365 * @param pGVM The global (ring-0) VM structure.
366 * @param uSvnRev The SVN revision of the ring-3 part.
367 * @param uBuildType Build type indicator.
368 * @thread EMT(0)
369 */
370static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
371{
372 VMM_CHECK_SMAP_SETUP();
373 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
374
375 /*
376 * Match the SVN revisions and build type.
377 */
378 if (uSvnRev != VMMGetSvnRev())
379 {
380 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
381 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
382 return VERR_VMM_R0_VERSION_MISMATCH;
383 }
384 if (uBuildType != vmmGetBuildType())
385 {
386 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
387 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
388 return VERR_VMM_R0_VERSION_MISMATCH;
389 }
390
391 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
392 if (RT_FAILURE(rc))
393 return rc;
394
395#ifdef LOG_ENABLED
396 /*
397 * Register the EMT R0 logger instance for VCPU 0.
398 */
399 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
400
401 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
402 if (pR0Logger)
403 {
404# if 0 /* testing of the logger. */
405 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
406 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
407 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
408 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
409
410 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
411 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
412 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
413 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
414
415 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
416 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
417 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
418 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
419
420 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
421 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
422 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
423 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
424 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
425 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
426
427 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
428 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
429
430 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
431 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
432 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
433# endif
434 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
435 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
436 pR0Logger->fRegistered = true;
437 }
438#endif /* LOG_ENABLED */
439SUPR0Printf("VMMR0InitVM: eflags=%x fKernelFeatures=%#x (SUPKERNELFEATURES_SMAP=%d)\n",
440 ASMGetFlags(), fKernelFeatures, RT_BOOL(fKernelFeatures & SUPKERNELFEATURES_SMAP));
441
442 /*
443 * Check if the host supports high resolution timers or not.
444 */
445 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
446 && !RTTimerCanDoHighResolution())
447 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
448
449 /*
450 * Initialize the per VM data for GVMM and GMM.
451 */
452 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
453 rc = GVMMR0InitVM(pGVM);
454 if (RT_SUCCESS(rc))
455 {
456 /*
457 * Init HM, CPUM and PGM (Darwin only).
458 */
459 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
460 rc = HMR0InitVM(pGVM);
461 if (RT_SUCCESS(rc))
462 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
463 if (RT_SUCCESS(rc))
464 {
465 rc = CPUMR0InitVM(pGVM);
466 if (RT_SUCCESS(rc))
467 {
468 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
469 rc = PGMR0InitVM(pGVM);
470 if (RT_SUCCESS(rc))
471 {
472 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
473 rc = EMR0InitVM(pGVM);
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
477#ifdef VBOX_WITH_PCI_PASSTHROUGH
478 rc = PciRawR0InitVM(pGVM);
479#endif
480 if (RT_SUCCESS(rc))
481 {
482 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
483 rc = GIMR0InitVM(pGVM);
484 if (RT_SUCCESS(rc))
485 {
486 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
487 if (RT_SUCCESS(rc))
488 {
489 GVMMR0DoneInitVM(pGVM);
490
491 /*
492 * Collect a bit of info for the VM release log.
493 */
494 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
495 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
496
497 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
498 return rc;
499 }
500
501 /* bail out*/
502 GIMR0TermVM(pGVM);
503 }
504#ifdef VBOX_WITH_PCI_PASSTHROUGH
505 PciRawR0TermVM(pGVM);
506#endif
507 }
508 }
509 }
510 }
511 HMR0TermVM(pGVM);
512 }
513 }
514
515 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
516 return rc;
517}
518
519
520/**
521 * Does EMT specific VM initialization.
522 *
523 * @returns VBox status code.
524 * @param pGVM The ring-0 VM structure.
525 * @param idCpu The EMT that's calling.
526 */
527static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
528{
529 /* Paranoia (caller checked these already). */
530 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
531 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
532
533#ifdef LOG_ENABLED
534 /*
535 * Registration of ring 0 loggers.
536 */
537 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
538 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
539 if ( pR0Logger
540 && !pR0Logger->fRegistered)
541 {
542 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
543 pR0Logger->fRegistered = true;
544 }
545#endif
546
547 return VINF_SUCCESS;
548}
549
550
551
552/**
553 * Terminates the R0 bits for a particular VM instance.
554 *
555 * This is normally called by ring-3 as part of the VM termination process, but
556 * may alternatively be called during the support driver session cleanup when
557 * the VM object is destroyed (see GVMM).
558 *
559 * @returns VBox status code.
560 *
561 * @param pGVM The global (ring-0) VM structure.
562 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
563 * thread.
564 * @thread EMT(0) or session clean up thread.
565 */
566VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
567{
568 /*
569 * Check EMT(0) claim if we're called from userland.
570 */
571 if (idCpu != NIL_VMCPUID)
572 {
573 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
574 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
575 if (RT_FAILURE(rc))
576 return rc;
577 }
578
579#ifdef VBOX_WITH_PCI_PASSTHROUGH
580 PciRawR0TermVM(pGVM);
581#endif
582
583 /*
584 * Tell GVMM what we're up to and check that we only do this once.
585 */
586 if (GVMMR0DoingTermVM(pGVM))
587 {
588 GIMR0TermVM(pGVM);
589
590 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
591 * here to make sure we don't leak any shared pages if we crash... */
592#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
593 PGMR0DynMapTermVM(pGVM);
594#endif
595 HMR0TermVM(pGVM);
596 }
597
598 /*
599 * Deregister the logger.
600 */
601 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
602 return VINF_SUCCESS;
603}
604
605
606/**
607 * An interrupt or unhalt force flag is set, deal with it.
608 *
609 * @returns VINF_SUCCESS (or VINF_EM_HALT).
610 * @param pVCpu The cross context virtual CPU structure.
611 * @param uMWait Result from EMMonitorWaitIsActive().
612 * @param enmInterruptibility Guest CPU interruptbility level.
613 */
614static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
615{
616 Assert(!TRPMHasTrap(pVCpu));
617 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
618 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
619
620 /*
621 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
622 */
623 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
624 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
625 {
626 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
627 {
628 uint8_t u8Interrupt = 0;
629 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
630 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
631 if (RT_SUCCESS(rc))
632 {
633 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
634
635 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
636 AssertRCSuccess(rc);
637 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
638 return rc;
639 }
640 }
641 }
642 /*
643 * SMI is not implemented yet, at least not here.
644 */
645 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
646 {
647 return VINF_EM_HALT;
648 }
649 /*
650 * NMI.
651 */
652 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
653 {
654 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
655 {
656 /** @todo later. */
657 return VINF_EM_HALT;
658 }
659 }
660 /*
661 * Nested-guest virtual interrupt.
662 */
663 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
664 {
665 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
666 {
667 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
668 * here before injecting the virtual interrupt. See emR3ForcedActions
669 * for details. */
670 return VINF_EM_HALT;
671 }
672 }
673
674 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
675 {
676 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
677 return VINF_SUCCESS;
678 }
679 if (uMWait > 1)
680 {
681 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
682 return VINF_SUCCESS;
683 }
684
685 return VINF_EM_HALT;
686}
687
688
689/**
690 * This does one round of vmR3HaltGlobal1Halt().
691 *
692 * The rational here is that we'll reduce latency in interrupt situations if we
693 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
694 * MWAIT), but do one round of blocking here instead and hope the interrupt is
695 * raised in the meanwhile.
696 *
697 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
698 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
699 * ring-0 call (unless we're too close to a timer event). When the interrupt
700 * wakes us up, we'll return from ring-0 and EM will by instinct do a
701 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
702 * back to VMMR0EntryFast().
703 *
704 * @returns VINF_SUCCESS or VINF_EM_HALT.
705 * @param pGVM The ring-0 VM structure.
706 * @param pGVCpu The ring-0 virtual CPU structure.
707 *
708 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
709 * the VM module, probably to VMM. Then this would be more weird wrt
710 * parameters and statistics.
711 */
712static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
713{
714 /*
715 * Do spin stat historization.
716 */
717 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
718 { /* likely */ }
719 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
720 {
721 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
722 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
723 }
724 else
725 {
726 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
727 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
728 }
729
730 /*
731 * Flags that makes us go to ring-3.
732 */
733 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
734 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
735 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
736 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
737 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
738 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
739 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
740 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
741
742 /*
743 * Check preconditions.
744 */
745 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
746 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
747 if ( pGVCpu->vmm.s.fMayHaltInRing0
748 && !TRPMHasTrap(pGVCpu)
749 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
750 || uMWait > 1))
751 {
752 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
753 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
754 {
755 /*
756 * Interrupts pending already?
757 */
758 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
759 APICUpdatePendingInterrupts(pGVCpu);
760
761 /*
762 * Flags that wake up from the halted state.
763 */
764 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
765 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
766
767 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
768 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
769 ASMNopPause();
770
771 /*
772 * Check out how long till the next timer event.
773 */
774 uint64_t u64Delta;
775 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
776
777 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
778 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
779 {
780 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
781 APICUpdatePendingInterrupts(pGVCpu);
782
783 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
784 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
785
786 /*
787 * Wait if there is enough time to the next timer event.
788 */
789 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
790 {
791 /* If there are few other CPU cores around, we will procrastinate a
792 little before going to sleep, hoping for some device raising an
793 interrupt or similar. Though, the best thing here would be to
794 dynamically adjust the spin count according to its usfulness or
795 something... */
796 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
797 && RTMpGetOnlineCount() >= 4)
798 {
799 /** @todo Figure out how we can skip this if it hasn't help recently...
800 * @bugref{9172#c12} */
801 uint32_t cSpinLoops = 42;
802 while (cSpinLoops-- > 0)
803 {
804 ASMNopPause();
805 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
806 APICUpdatePendingInterrupts(pGVCpu);
807 ASMNopPause();
808 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
809 {
810 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
811 return VINF_EM_HALT;
812 }
813 ASMNopPause();
814 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
815 {
816 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
817 return VINF_EM_HALT;
818 }
819 ASMNopPause();
820 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
821 {
822 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
823 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
824 }
825 ASMNopPause();
826 }
827 }
828
829 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
830 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
831 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
832 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
833 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
834 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
835 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
836 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
837 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
838 if ( rc == VINF_SUCCESS
839 || rc == VERR_INTERRUPTED)
840
841 {
842 /* Keep some stats like ring-3 does. */
843 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
844 if (cNsOverslept > 50000)
845 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
846 else if (cNsOverslept < -50000)
847 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
848 else
849 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
850
851 /*
852 * Recheck whether we can resume execution or have to go to ring-3.
853 */
854 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
855 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
856 {
857 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
858 APICUpdatePendingInterrupts(pGVCpu);
859 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
860 {
861 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
862 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
863 }
864 }
865 }
866 }
867 }
868 }
869 }
870 return VINF_EM_HALT;
871}
872
873
874/**
875 * VMM ring-0 thread-context callback.
876 *
877 * This does common HM state updating and calls the HM-specific thread-context
878 * callback.
879 *
880 * @param enmEvent The thread-context event.
881 * @param pvUser Opaque pointer to the VMCPU.
882 *
883 * @thread EMT(pvUser)
884 */
885static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
886{
887 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
888
889 switch (enmEvent)
890 {
891 case RTTHREADCTXEVENT_IN:
892 {
893 /*
894 * Linux may call us with preemption enabled (really!) but technically we
895 * cannot get preempted here, otherwise we end up in an infinite recursion
896 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
897 * ad infinitum). Let's just disable preemption for now...
898 */
899 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
900 * preemption after doing the callout (one or two functions up the
901 * call chain). */
902 /** @todo r=ramshankar: See @bugref{5313#c30}. */
903 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
904 RTThreadPreemptDisable(&ParanoidPreemptState);
905
906 /* We need to update the VCPU <-> host CPU mapping. */
907 RTCPUID idHostCpu;
908 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
909 pVCpu->iHostCpuSet = iHostCpuSet;
910 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
911
912 /* In the very unlikely event that the GIP delta for the CPU we're
913 rescheduled needs calculating, try force a return to ring-3.
914 We unfortunately cannot do the measurements right here. */
915 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
916 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
917
918 /* Invoke the HM-specific thread-context callback. */
919 HMR0ThreadCtxCallback(enmEvent, pvUser);
920
921 /* Restore preemption. */
922 RTThreadPreemptRestore(&ParanoidPreemptState);
923 break;
924 }
925
926 case RTTHREADCTXEVENT_OUT:
927 {
928 /* Invoke the HM-specific thread-context callback. */
929 HMR0ThreadCtxCallback(enmEvent, pvUser);
930
931 /*
932 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
933 * have the same host CPU associated with it.
934 */
935 pVCpu->iHostCpuSet = UINT32_MAX;
936 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
937 break;
938 }
939
940 default:
941 /* Invoke the HM-specific thread-context callback. */
942 HMR0ThreadCtxCallback(enmEvent, pvUser);
943 break;
944 }
945}
946
947
948/**
949 * Creates thread switching hook for the current EMT thread.
950 *
951 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
952 * platform does not implement switcher hooks, no hooks will be create and the
953 * member set to NIL_RTTHREADCTXHOOK.
954 *
955 * @returns VBox status code.
956 * @param pVCpu The cross context virtual CPU structure.
957 * @thread EMT(pVCpu)
958 */
959VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
960{
961 VMCPU_ASSERT_EMT(pVCpu);
962 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
963
964#if 1 /* To disable this stuff change to zero. */
965 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
966 if (RT_SUCCESS(rc))
967 return rc;
968#else
969 RT_NOREF(vmmR0ThreadCtxCallback);
970 int rc = VERR_NOT_SUPPORTED;
971#endif
972
973 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
974 if (rc == VERR_NOT_SUPPORTED)
975 return VINF_SUCCESS;
976
977 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
978 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
979}
980
981
982/**
983 * Destroys the thread switching hook for the specified VCPU.
984 *
985 * @param pVCpu The cross context virtual CPU structure.
986 * @remarks Can be called from any thread.
987 */
988VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
989{
990 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
991 AssertRC(rc);
992 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
993}
994
995
996/**
997 * Disables the thread switching hook for this VCPU (if we got one).
998 *
999 * @param pVCpu The cross context virtual CPU structure.
1000 * @thread EMT(pVCpu)
1001 *
1002 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1003 * this call. This means you have to be careful with what you do!
1004 */
1005VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1006{
1007 /*
1008 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1009 * @bugref{7726#c19} explains the need for this trick:
1010 *
1011 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1012 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1013 * longjmp & normal return to ring-3, which opens a window where we may be
1014 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1015 * the CPU starts executing a different EMT. Both functions first disables
1016 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1017 * an opening for getting preempted.
1018 */
1019 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1020 * all the time. */
1021 /** @todo move this into the context hook disabling if(). */
1022 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1023
1024 /*
1025 * Disable the context hook, if we got one.
1026 */
1027 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1028 {
1029 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1030 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1031 AssertRC(rc);
1032 }
1033}
1034
1035
1036/**
1037 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1038 *
1039 * @returns true if registered, false otherwise.
1040 * @param pVCpu The cross context virtual CPU structure.
1041 */
1042DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1043{
1044 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1045}
1046
1047
1048/**
1049 * Whether thread-context hooks are registered for this VCPU.
1050 *
1051 * @returns true if registered, false otherwise.
1052 * @param pVCpu The cross context virtual CPU structure.
1053 */
1054VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1055{
1056 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1057}
1058
1059
1060/**
1061 * Returns the ring-0 release logger instance.
1062 *
1063 * @returns Pointer to release logger, NULL if not configured.
1064 * @param pVCpu The cross context virtual CPU structure of the caller.
1065 * @thread EMT(pVCpu)
1066 */
1067VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1068{
1069 PVMMR0LOGGER pLogger = pVCpu->vmm.s.pR0RelLoggerR0;
1070 if (pLogger)
1071 return &pLogger->Logger;
1072 return NULL;
1073}
1074
1075
1076#ifdef VBOX_WITH_STATISTICS
1077/**
1078 * Record return code statistics
1079 * @param pVM The cross context VM structure.
1080 * @param pVCpu The cross context virtual CPU structure.
1081 * @param rc The status code.
1082 */
1083static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1084{
1085 /*
1086 * Collect statistics.
1087 */
1088 switch (rc)
1089 {
1090 case VINF_SUCCESS:
1091 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1092 break;
1093 case VINF_EM_RAW_INTERRUPT:
1094 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1095 break;
1096 case VINF_EM_RAW_INTERRUPT_HYPER:
1097 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1098 break;
1099 case VINF_EM_RAW_GUEST_TRAP:
1100 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1101 break;
1102 case VINF_EM_RAW_RING_SWITCH:
1103 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1104 break;
1105 case VINF_EM_RAW_RING_SWITCH_INT:
1106 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1107 break;
1108 case VINF_EM_RAW_STALE_SELECTOR:
1109 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1110 break;
1111 case VINF_EM_RAW_IRET_TRAP:
1112 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1113 break;
1114 case VINF_IOM_R3_IOPORT_READ:
1115 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1116 break;
1117 case VINF_IOM_R3_IOPORT_WRITE:
1118 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1119 break;
1120 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1121 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1122 break;
1123 case VINF_IOM_R3_MMIO_READ:
1124 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1125 break;
1126 case VINF_IOM_R3_MMIO_WRITE:
1127 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1128 break;
1129 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1130 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1131 break;
1132 case VINF_IOM_R3_MMIO_READ_WRITE:
1133 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1134 break;
1135 case VINF_PATM_HC_MMIO_PATCH_READ:
1136 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1137 break;
1138 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1139 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1140 break;
1141 case VINF_CPUM_R3_MSR_READ:
1142 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1143 break;
1144 case VINF_CPUM_R3_MSR_WRITE:
1145 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1146 break;
1147 case VINF_EM_RAW_EMULATE_INSTR:
1148 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1149 break;
1150 case VINF_PATCH_EMULATE_INSTR:
1151 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1152 break;
1153 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1154 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1155 break;
1156 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1157 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1158 break;
1159 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1160 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1161 break;
1162 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1163 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1164 break;
1165 case VINF_CSAM_PENDING_ACTION:
1166 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1167 break;
1168 case VINF_PGM_SYNC_CR3:
1169 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1170 break;
1171 case VINF_PATM_PATCH_INT3:
1172 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1173 break;
1174 case VINF_PATM_PATCH_TRAP_PF:
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1176 break;
1177 case VINF_PATM_PATCH_TRAP_GP:
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1179 break;
1180 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1182 break;
1183 case VINF_EM_RESCHEDULE_REM:
1184 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1185 break;
1186 case VINF_EM_RAW_TO_R3:
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1188 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1189 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1190 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1192 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1194 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1195 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1196 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1198 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1200 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1201 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1202 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1204 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1205 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1206 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1207 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1208 else
1209 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1210 break;
1211
1212 case VINF_EM_RAW_TIMER_PENDING:
1213 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1214 break;
1215 case VINF_EM_RAW_INTERRUPT_PENDING:
1216 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1217 break;
1218 case VINF_VMM_CALL_HOST:
1219 switch (pVCpu->vmm.s.enmCallRing3Operation)
1220 {
1221 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1222 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1223 break;
1224 case VMMCALLRING3_PDM_LOCK:
1225 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1226 break;
1227 case VMMCALLRING3_PGM_POOL_GROW:
1228 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1229 break;
1230 case VMMCALLRING3_PGM_LOCK:
1231 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1232 break;
1233 case VMMCALLRING3_PGM_MAP_CHUNK:
1234 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1235 break;
1236 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1237 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1238 break;
1239 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1240 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1241 break;
1242 case VMMCALLRING3_VM_SET_ERROR:
1243 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1244 break;
1245 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1246 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1247 break;
1248 case VMMCALLRING3_VM_R0_ASSERTION:
1249 default:
1250 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1251 break;
1252 }
1253 break;
1254 case VINF_PATM_DUPLICATE_FUNCTION:
1255 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1256 break;
1257 case VINF_PGM_CHANGE_MODE:
1258 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1259 break;
1260 case VINF_PGM_POOL_FLUSH_PENDING:
1261 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1262 break;
1263 case VINF_EM_PENDING_REQUEST:
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1265 break;
1266 case VINF_EM_HM_PATCH_TPR_INSTR:
1267 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1268 break;
1269 default:
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1271 break;
1272 }
1273}
1274#endif /* VBOX_WITH_STATISTICS */
1275
1276
1277/**
1278 * The Ring 0 entry point, called by the fast-ioctl path.
1279 *
1280 * @param pGVM The global (ring-0) VM structure.
1281 * @param pVMIgnored The cross context VM structure. The return code is
1282 * stored in pVM->vmm.s.iLastGZRc.
1283 * @param idCpu The Virtual CPU ID of the calling EMT.
1284 * @param enmOperation Which operation to execute.
1285 * @remarks Assume called with interrupts _enabled_.
1286 */
1287VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1288{
1289 RT_NOREF(pVMIgnored);
1290
1291 /*
1292 * Validation.
1293 */
1294 if ( idCpu < pGVM->cCpus
1295 && pGVM->cCpus == pGVM->cCpusUnsafe)
1296 { /*likely*/ }
1297 else
1298 {
1299 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1300 return;
1301 }
1302
1303 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1304 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1305 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1306 && pGVCpu->hNativeThreadR0 == hNativeThread))
1307 { /* likely */ }
1308 else
1309 {
1310 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1311 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1312 return;
1313 }
1314
1315 /*
1316 * SMAP fun.
1317 */
1318 VMM_CHECK_SMAP_SETUP();
1319 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1320
1321 /*
1322 * Perform requested operation.
1323 */
1324 switch (enmOperation)
1325 {
1326 /*
1327 * Run guest code using the available hardware acceleration technology.
1328 */
1329 case VMMR0_DO_HM_RUN:
1330 {
1331 for (;;) /* hlt loop */
1332 {
1333 /*
1334 * Disable preemption.
1335 */
1336 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1337 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1338 RTThreadPreemptDisable(&PreemptState);
1339
1340 /*
1341 * Get the host CPU identifiers, make sure they are valid and that
1342 * we've got a TSC delta for the CPU.
1343 */
1344 RTCPUID idHostCpu;
1345 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1346 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1347 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1348 {
1349 pGVCpu->iHostCpuSet = iHostCpuSet;
1350 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1351
1352 /*
1353 * Update the periodic preemption timer if it's active.
1354 */
1355 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1356 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1357 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1358
1359#ifdef VMM_R0_TOUCH_FPU
1360 /*
1361 * Make sure we've got the FPU state loaded so and we don't need to clear
1362 * CR0.TS and get out of sync with the host kernel when loading the guest
1363 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1364 */
1365 CPUMR0TouchHostFpu();
1366#endif
1367 int rc;
1368 bool fPreemptRestored = false;
1369 if (!HMR0SuspendPending())
1370 {
1371 /*
1372 * Enable the context switching hook.
1373 */
1374 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1375 {
1376 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
1377 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1378 }
1379
1380 /*
1381 * Enter HM context.
1382 */
1383 rc = HMR0Enter(pGVCpu);
1384 if (RT_SUCCESS(rc))
1385 {
1386 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1387
1388 /*
1389 * When preemption hooks are in place, enable preemption now that
1390 * we're in HM context.
1391 */
1392 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1393 {
1394 fPreemptRestored = true;
1395 RTThreadPreemptRestore(&PreemptState);
1396 }
1397
1398 /*
1399 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1400 */
1401 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1402 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1403 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1404
1405 /*
1406 * Assert sanity on the way out. Using manual assertions code here as normal
1407 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1408 */
1409 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1410 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1411 {
1412 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1413 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1414 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1415 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1416 }
1417 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1418 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1419 {
1420 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1421 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1422 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1423 rc = VERR_INVALID_STATE;
1424 }
1425
1426 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1427 }
1428 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1429
1430 /*
1431 * Invalidate the host CPU identifiers before we disable the context
1432 * hook / restore preemption.
1433 */
1434 pGVCpu->iHostCpuSet = UINT32_MAX;
1435 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1436
1437 /*
1438 * Disable context hooks. Due to unresolved cleanup issues, we
1439 * cannot leave the hooks enabled when we return to ring-3.
1440 *
1441 * Note! At the moment HM may also have disabled the hook
1442 * when we get here, but the IPRT API handles that.
1443 */
1444 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1445 {
1446 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1447 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
1448 }
1449 }
1450 /*
1451 * The system is about to go into suspend mode; go back to ring 3.
1452 */
1453 else
1454 {
1455 rc = VINF_EM_RAW_INTERRUPT;
1456 pGVCpu->iHostCpuSet = UINT32_MAX;
1457 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1458 }
1459
1460 /** @todo When HM stops messing with the context hook state, we'll disable
1461 * preemption again before the RTThreadCtxHookDisable call. */
1462 if (!fPreemptRestored)
1463 RTThreadPreemptRestore(&PreemptState);
1464
1465 pGVCpu->vmm.s.iLastGZRc = rc;
1466
1467 /* Fire dtrace probe and collect statistics. */
1468 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1469#ifdef VBOX_WITH_STATISTICS
1470 vmmR0RecordRC(pGVM, pGVCpu, rc);
1471#endif
1472#if 1
1473 /*
1474 * If this is a halt.
1475 */
1476 if (rc != VINF_EM_HALT)
1477 { /* we're not in a hurry for a HLT, so prefer this path */ }
1478 else
1479 {
1480 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1481 if (rc == VINF_SUCCESS)
1482 {
1483 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1484 continue;
1485 }
1486 pGVCpu->vmm.s.cR0HaltsToRing3++;
1487 }
1488#endif
1489 }
1490 /*
1491 * Invalid CPU set index or TSC delta in need of measuring.
1492 */
1493 else
1494 {
1495 pGVCpu->iHostCpuSet = UINT32_MAX;
1496 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1497 RTThreadPreemptRestore(&PreemptState);
1498 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1499 {
1500 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1501 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1502 0 /*default cTries*/);
1503 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1504 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1505 else
1506 pGVCpu->vmm.s.iLastGZRc = rc;
1507 }
1508 else
1509 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1510 }
1511 break;
1512
1513 } /* halt loop. */
1514 break;
1515 }
1516
1517#ifdef VBOX_WITH_NEM_R0
1518# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1519 case VMMR0_DO_NEM_RUN:
1520 {
1521 /*
1522 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1523 */
1524 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1525# ifdef VBOXSTRICTRC_STRICT_ENABLED
1526 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1527# else
1528 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1529# endif
1530 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1531 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1532
1533 pGVCpu->vmm.s.iLastGZRc = rc;
1534
1535 /*
1536 * Fire dtrace probe and collect statistics.
1537 */
1538 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1539# ifdef VBOX_WITH_STATISTICS
1540 vmmR0RecordRC(pGVM, pGVCpu, rc);
1541# endif
1542 break;
1543 }
1544# endif
1545#endif
1546
1547 /*
1548 * For profiling.
1549 */
1550 case VMMR0_DO_NOP:
1551 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1552 break;
1553
1554 /*
1555 * Shouldn't happen.
1556 */
1557 default:
1558 AssertMsgFailed(("%#x\n", enmOperation));
1559 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1560 break;
1561 }
1562 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1563}
1564
1565
1566/**
1567 * Validates a session or VM session argument.
1568 *
1569 * @returns true / false accordingly.
1570 * @param pGVM The global (ring-0) VM structure.
1571 * @param pClaimedSession The session claim to validate.
1572 * @param pSession The session argument.
1573 */
1574DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1575{
1576 /* This must be set! */
1577 if (!pSession)
1578 return false;
1579
1580 /* Only one out of the two. */
1581 if (pGVM && pClaimedSession)
1582 return false;
1583 if (pGVM)
1584 pClaimedSession = pGVM->pSession;
1585 return pClaimedSession == pSession;
1586}
1587
1588
1589/**
1590 * VMMR0EntryEx worker function, either called directly or when ever possible
1591 * called thru a longjmp so we can exit safely on failure.
1592 *
1593 * @returns VBox status code.
1594 * @param pGVM The global (ring-0) VM structure.
1595 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1596 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1597 * @param enmOperation Which operation to execute.
1598 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1599 * The support driver validates this if it's present.
1600 * @param u64Arg Some simple constant argument.
1601 * @param pSession The session of the caller.
1602 *
1603 * @remarks Assume called with interrupts _enabled_.
1604 */
1605static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1606 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1607{
1608 /*
1609 * Validate pGVM and idCpu for consistency and validity.
1610 */
1611 if (pGVM != NULL)
1612 {
1613 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1614 { /* likely */ }
1615 else
1616 {
1617 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1618 return VERR_INVALID_POINTER;
1619 }
1620
1621 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1622 { /* likely */ }
1623 else
1624 {
1625 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1626 return VERR_INVALID_PARAMETER;
1627 }
1628
1629 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1630 && pGVM->enmVMState <= VMSTATE_TERMINATED
1631 && pGVM->pSession == pSession
1632 && pGVM->pSelf == pGVM))
1633 { /* likely */ }
1634 else
1635 {
1636 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1637 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1638 return VERR_INVALID_POINTER;
1639 }
1640 }
1641 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1642 { /* likely */ }
1643 else
1644 {
1645 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1646 return VERR_INVALID_PARAMETER;
1647 }
1648
1649 /*
1650 * SMAP fun.
1651 */
1652 VMM_CHECK_SMAP_SETUP();
1653 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1654
1655 /*
1656 * Process the request.
1657 */
1658 int rc;
1659 switch (enmOperation)
1660 {
1661 /*
1662 * GVM requests
1663 */
1664 case VMMR0_DO_GVMM_CREATE_VM:
1665 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1666 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1667 else
1668 rc = VERR_INVALID_PARAMETER;
1669 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1670 break;
1671
1672 case VMMR0_DO_GVMM_DESTROY_VM:
1673 if (pReqHdr == NULL && u64Arg == 0)
1674 rc = GVMMR0DestroyVM(pGVM);
1675 else
1676 rc = VERR_INVALID_PARAMETER;
1677 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1678 break;
1679
1680 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1681 if (pGVM != NULL)
1682 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1683 else
1684 rc = VERR_INVALID_PARAMETER;
1685 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1686 break;
1687
1688 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1689 if (pGVM != NULL)
1690 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1691 else
1692 rc = VERR_INVALID_PARAMETER;
1693 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1694 break;
1695
1696 case VMMR0_DO_GVMM_SCHED_HALT:
1697 if (pReqHdr)
1698 return VERR_INVALID_PARAMETER;
1699 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1700 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1701 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1702 break;
1703
1704 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1705 if (pReqHdr || u64Arg)
1706 return VERR_INVALID_PARAMETER;
1707 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1708 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1709 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1710 break;
1711
1712 case VMMR0_DO_GVMM_SCHED_POKE:
1713 if (pReqHdr || u64Arg)
1714 return VERR_INVALID_PARAMETER;
1715 rc = GVMMR0SchedPoke(pGVM, idCpu);
1716 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1717 break;
1718
1719 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1720 if (u64Arg)
1721 return VERR_INVALID_PARAMETER;
1722 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1723 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1724 break;
1725
1726 case VMMR0_DO_GVMM_SCHED_POLL:
1727 if (pReqHdr || u64Arg > 1)
1728 return VERR_INVALID_PARAMETER;
1729 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1730 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1731 break;
1732
1733 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1734 if (u64Arg)
1735 return VERR_INVALID_PARAMETER;
1736 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1737 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1738 break;
1739
1740 case VMMR0_DO_GVMM_RESET_STATISTICS:
1741 if (u64Arg)
1742 return VERR_INVALID_PARAMETER;
1743 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1744 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1745 break;
1746
1747 /*
1748 * Initialize the R0 part of a VM instance.
1749 */
1750 case VMMR0_DO_VMMR0_INIT:
1751 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1752 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1753 break;
1754
1755 /*
1756 * Does EMT specific ring-0 init.
1757 */
1758 case VMMR0_DO_VMMR0_INIT_EMT:
1759 rc = vmmR0InitVMEmt(pGVM, idCpu);
1760 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1761 break;
1762
1763 /*
1764 * Terminate the R0 part of a VM instance.
1765 */
1766 case VMMR0_DO_VMMR0_TERM:
1767 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1768 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1769 break;
1770
1771 /*
1772 * Attempt to enable hm mode and check the current setting.
1773 */
1774 case VMMR0_DO_HM_ENABLE:
1775 rc = HMR0EnableAllCpus(pGVM);
1776 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1777 break;
1778
1779 /*
1780 * Setup the hardware accelerated session.
1781 */
1782 case VMMR0_DO_HM_SETUP_VM:
1783 rc = HMR0SetupVM(pGVM);
1784 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1785 break;
1786
1787 /*
1788 * PGM wrappers.
1789 */
1790 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1791 if (idCpu == NIL_VMCPUID)
1792 return VERR_INVALID_CPU_ID;
1793 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1794 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1795 break;
1796
1797 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1798 if (idCpu == NIL_VMCPUID)
1799 return VERR_INVALID_CPU_ID;
1800 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1801 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1802 break;
1803
1804 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1805 if (idCpu == NIL_VMCPUID)
1806 return VERR_INVALID_CPU_ID;
1807 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1808 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1809 break;
1810
1811 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1812 if (idCpu != 0)
1813 return VERR_INVALID_CPU_ID;
1814 rc = PGMR0PhysSetupIoMmu(pGVM);
1815 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1816 break;
1817
1818 case VMMR0_DO_PGM_POOL_GROW:
1819 if (idCpu == NIL_VMCPUID)
1820 return VERR_INVALID_CPU_ID;
1821 rc = PGMR0PoolGrow(pGVM);
1822 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1823 break;
1824
1825 /*
1826 * GMM wrappers.
1827 */
1828 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1829 if (u64Arg)
1830 return VERR_INVALID_PARAMETER;
1831 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1832 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1833 break;
1834
1835 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1836 if (u64Arg)
1837 return VERR_INVALID_PARAMETER;
1838 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1839 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1840 break;
1841
1842 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1843 if (u64Arg)
1844 return VERR_INVALID_PARAMETER;
1845 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1846 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1847 break;
1848
1849 case VMMR0_DO_GMM_FREE_PAGES:
1850 if (u64Arg)
1851 return VERR_INVALID_PARAMETER;
1852 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1853 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1854 break;
1855
1856 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1857 if (u64Arg)
1858 return VERR_INVALID_PARAMETER;
1859 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1860 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1861 break;
1862
1863 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1864 if (u64Arg)
1865 return VERR_INVALID_PARAMETER;
1866 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1867 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1868 break;
1869
1870 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1871 if (idCpu == NIL_VMCPUID)
1872 return VERR_INVALID_CPU_ID;
1873 if (u64Arg)
1874 return VERR_INVALID_PARAMETER;
1875 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1876 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1877 break;
1878
1879 case VMMR0_DO_GMM_BALLOONED_PAGES:
1880 if (u64Arg)
1881 return VERR_INVALID_PARAMETER;
1882 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1883 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1884 break;
1885
1886 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1887 if (u64Arg)
1888 return VERR_INVALID_PARAMETER;
1889 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1890 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1891 break;
1892
1893 case VMMR0_DO_GMM_SEED_CHUNK:
1894 if (pReqHdr)
1895 return VERR_INVALID_PARAMETER;
1896 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1897 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1898 break;
1899
1900 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1901 if (idCpu == NIL_VMCPUID)
1902 return VERR_INVALID_CPU_ID;
1903 if (u64Arg)
1904 return VERR_INVALID_PARAMETER;
1905 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1906 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1907 break;
1908
1909 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1910 if (idCpu == NIL_VMCPUID)
1911 return VERR_INVALID_CPU_ID;
1912 if (u64Arg)
1913 return VERR_INVALID_PARAMETER;
1914 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1915 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1916 break;
1917
1918 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1919 if (idCpu == NIL_VMCPUID)
1920 return VERR_INVALID_CPU_ID;
1921 if ( u64Arg
1922 || pReqHdr)
1923 return VERR_INVALID_PARAMETER;
1924 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1925 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1926 break;
1927
1928#ifdef VBOX_WITH_PAGE_SHARING
1929 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1930 {
1931 if (idCpu == NIL_VMCPUID)
1932 return VERR_INVALID_CPU_ID;
1933 if ( u64Arg
1934 || pReqHdr)
1935 return VERR_INVALID_PARAMETER;
1936 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1937 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1938 break;
1939 }
1940#endif
1941
1942#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1943 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1944 if (u64Arg)
1945 return VERR_INVALID_PARAMETER;
1946 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1947 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1948 break;
1949#endif
1950
1951 case VMMR0_DO_GMM_QUERY_STATISTICS:
1952 if (u64Arg)
1953 return VERR_INVALID_PARAMETER;
1954 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1955 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1956 break;
1957
1958 case VMMR0_DO_GMM_RESET_STATISTICS:
1959 if (u64Arg)
1960 return VERR_INVALID_PARAMETER;
1961 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1962 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1963 break;
1964
1965 /*
1966 * A quick GCFGM mock-up.
1967 */
1968 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1969 case VMMR0_DO_GCFGM_SET_VALUE:
1970 case VMMR0_DO_GCFGM_QUERY_VALUE:
1971 {
1972 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1973 return VERR_INVALID_PARAMETER;
1974 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1975 if (pReq->Hdr.cbReq != sizeof(*pReq))
1976 return VERR_INVALID_PARAMETER;
1977 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1978 {
1979 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1980 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1981 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1982 }
1983 else
1984 {
1985 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1986 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1987 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1988 }
1989 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1990 break;
1991 }
1992
1993 /*
1994 * PDM Wrappers.
1995 */
1996 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1997 {
1998 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1999 return VERR_INVALID_PARAMETER;
2000 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2001 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2002 break;
2003 }
2004
2005 case VMMR0_DO_PDM_DEVICE_CREATE:
2006 {
2007 if (!pReqHdr || u64Arg || idCpu != 0)
2008 return VERR_INVALID_PARAMETER;
2009 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2010 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2011 break;
2012 }
2013
2014 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2015 {
2016 if (!pReqHdr || u64Arg)
2017 return VERR_INVALID_PARAMETER;
2018 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2019 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2020 break;
2021 }
2022
2023 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2024 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2025 {
2026 if (!pReqHdr || u64Arg || idCpu != 0)
2027 return VERR_INVALID_PARAMETER;
2028 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2029 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2030 break;
2031 }
2032
2033 /*
2034 * Requests to the internal networking service.
2035 */
2036 case VMMR0_DO_INTNET_OPEN:
2037 {
2038 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2039 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2040 return VERR_INVALID_PARAMETER;
2041 rc = IntNetR0OpenReq(pSession, pReq);
2042 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2043 break;
2044 }
2045
2046 case VMMR0_DO_INTNET_IF_CLOSE:
2047 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2048 return VERR_INVALID_PARAMETER;
2049 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2050 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2051 break;
2052
2053
2054 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2055 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2056 return VERR_INVALID_PARAMETER;
2057 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2058 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2059 break;
2060
2061 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2062 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2063 return VERR_INVALID_PARAMETER;
2064 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2065 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2066 break;
2067
2068 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2069 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2070 return VERR_INVALID_PARAMETER;
2071 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2072 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2073 break;
2074
2075 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2076 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2077 return VERR_INVALID_PARAMETER;
2078 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2079 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2080 break;
2081
2082 case VMMR0_DO_INTNET_IF_SEND:
2083 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2084 return VERR_INVALID_PARAMETER;
2085 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2086 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2087 break;
2088
2089 case VMMR0_DO_INTNET_IF_WAIT:
2090 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2091 return VERR_INVALID_PARAMETER;
2092 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2093 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2094 break;
2095
2096 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2097 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2098 return VERR_INVALID_PARAMETER;
2099 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2100 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2101 break;
2102
2103#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2104 /*
2105 * Requests to host PCI driver service.
2106 */
2107 case VMMR0_DO_PCIRAW_REQ:
2108 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2109 return VERR_INVALID_PARAMETER;
2110 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2111 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2112 break;
2113#endif
2114
2115 /*
2116 * NEM requests.
2117 */
2118#ifdef VBOX_WITH_NEM_R0
2119# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2120 case VMMR0_DO_NEM_INIT_VM:
2121 if (u64Arg || pReqHdr || idCpu != 0)
2122 return VERR_INVALID_PARAMETER;
2123 rc = NEMR0InitVM(pGVM);
2124 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2125 break;
2126
2127 case VMMR0_DO_NEM_INIT_VM_PART_2:
2128 if (u64Arg || pReqHdr || idCpu != 0)
2129 return VERR_INVALID_PARAMETER;
2130 rc = NEMR0InitVMPart2(pGVM);
2131 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2132 break;
2133
2134 case VMMR0_DO_NEM_MAP_PAGES:
2135 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2136 return VERR_INVALID_PARAMETER;
2137 rc = NEMR0MapPages(pGVM, idCpu);
2138 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2139 break;
2140
2141 case VMMR0_DO_NEM_UNMAP_PAGES:
2142 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2143 return VERR_INVALID_PARAMETER;
2144 rc = NEMR0UnmapPages(pGVM, idCpu);
2145 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2146 break;
2147
2148 case VMMR0_DO_NEM_EXPORT_STATE:
2149 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2150 return VERR_INVALID_PARAMETER;
2151 rc = NEMR0ExportState(pGVM, idCpu);
2152 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2153 break;
2154
2155 case VMMR0_DO_NEM_IMPORT_STATE:
2156 if (pReqHdr || idCpu == NIL_VMCPUID)
2157 return VERR_INVALID_PARAMETER;
2158 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2159 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2160 break;
2161
2162 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2163 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2164 return VERR_INVALID_PARAMETER;
2165 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2166 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2167 break;
2168
2169 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2170 if (pReqHdr || idCpu == NIL_VMCPUID)
2171 return VERR_INVALID_PARAMETER;
2172 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2173 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2174 break;
2175
2176 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2177 if (u64Arg || pReqHdr)
2178 return VERR_INVALID_PARAMETER;
2179 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2180 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2181 break;
2182
2183# if 1 && defined(DEBUG_bird)
2184 case VMMR0_DO_NEM_EXPERIMENT:
2185 if (pReqHdr)
2186 return VERR_INVALID_PARAMETER;
2187 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2188 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2189 break;
2190# endif
2191# endif
2192#endif
2193
2194 /*
2195 * IOM requests.
2196 */
2197 case VMMR0_DO_IOM_GROW_IO_PORTS:
2198 {
2199 if (pReqHdr || idCpu != 0)
2200 return VERR_INVALID_PARAMETER;
2201 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2202 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2203 break;
2204 }
2205
2206 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2207 {
2208 if (pReqHdr || idCpu != 0)
2209 return VERR_INVALID_PARAMETER;
2210 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2211 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2212 break;
2213 }
2214
2215 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2216 {
2217 if (pReqHdr || idCpu != 0)
2218 return VERR_INVALID_PARAMETER;
2219 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2220 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2221 break;
2222 }
2223
2224 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2225 {
2226 if (pReqHdr || idCpu != 0)
2227 return VERR_INVALID_PARAMETER;
2228 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2229 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2230 break;
2231 }
2232
2233 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2234 {
2235 if (pReqHdr || idCpu != 0)
2236 return VERR_INVALID_PARAMETER;
2237 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2238 if (RT_SUCCESS(rc))
2239 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2240 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2241 break;
2242 }
2243
2244 /*
2245 * DBGF requests.
2246 */
2247#ifdef VBOX_WITH_DBGF_TRACING
2248 case VMMR0_DO_DBGF_TRACER_CREATE:
2249 {
2250 if (!pReqHdr || u64Arg || idCpu != 0)
2251 return VERR_INVALID_PARAMETER;
2252 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2253 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2254 break;
2255 }
2256
2257 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2258 {
2259 if (!pReqHdr || u64Arg)
2260 return VERR_INVALID_PARAMETER;
2261#if 0 /** @todo */
2262 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2263#else
2264 rc = VERR_NOT_IMPLEMENTED;
2265#endif
2266 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2267 break;
2268 }
2269#endif
2270
2271#ifdef VBOX_WITH_LOTS_OF_DBGF_BPS
2272 case VMMR0_DO_DBGF_BP_INIT:
2273 {
2274 if (!pReqHdr || u64Arg || idCpu != 0)
2275 return VERR_INVALID_PARAMETER;
2276 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2277 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2278 break;
2279 }
2280
2281 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2282 {
2283 if (!pReqHdr || u64Arg || idCpu != 0)
2284 return VERR_INVALID_PARAMETER;
2285 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2286 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2287 break;
2288 }
2289
2290 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2291 {
2292 if (!pReqHdr || u64Arg || idCpu != 0)
2293 return VERR_INVALID_PARAMETER;
2294 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2295 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2296 break;
2297 }
2298
2299 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2300 {
2301 if (!pReqHdr || u64Arg || idCpu != 0)
2302 return VERR_INVALID_PARAMETER;
2303 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2304 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2305 break;
2306 }
2307#endif
2308
2309 /*
2310 * For profiling.
2311 */
2312 case VMMR0_DO_NOP:
2313 case VMMR0_DO_SLOW_NOP:
2314 return VINF_SUCCESS;
2315
2316 /*
2317 * For testing Ring-0 APIs invoked in this environment.
2318 */
2319 case VMMR0_DO_TESTS:
2320 /** @todo make new test */
2321 return VINF_SUCCESS;
2322
2323 default:
2324 /*
2325 * We're returning VERR_NOT_SUPPORT here so we've got something else
2326 * than -1 which the interrupt gate glue code might return.
2327 */
2328 Log(("operation %#x is not supported\n", enmOperation));
2329 return VERR_NOT_SUPPORTED;
2330 }
2331 return rc;
2332}
2333
2334
2335/**
2336 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2337 */
2338typedef struct VMMR0ENTRYEXARGS
2339{
2340 PGVM pGVM;
2341 VMCPUID idCpu;
2342 VMMR0OPERATION enmOperation;
2343 PSUPVMMR0REQHDR pReq;
2344 uint64_t u64Arg;
2345 PSUPDRVSESSION pSession;
2346} VMMR0ENTRYEXARGS;
2347/** Pointer to a vmmR0EntryExWrapper argument package. */
2348typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2349
2350/**
2351 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2352 *
2353 * @returns VBox status code.
2354 * @param pvArgs The argument package
2355 */
2356static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2357{
2358 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2359 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2360 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2361 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2362 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2363 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2364}
2365
2366
2367/**
2368 * The Ring 0 entry point, called by the support library (SUP).
2369 *
2370 * @returns VBox status code.
2371 * @param pGVM The global (ring-0) VM structure.
2372 * @param pVM The cross context VM structure.
2373 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2374 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2375 * @param enmOperation Which operation to execute.
2376 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2377 * @param u64Arg Some simple constant argument.
2378 * @param pSession The session of the caller.
2379 * @remarks Assume called with interrupts _enabled_.
2380 */
2381VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2382 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2383{
2384 /*
2385 * Requests that should only happen on the EMT thread will be
2386 * wrapped in a setjmp so we can assert without causing trouble.
2387 */
2388 if ( pVM != NULL
2389 && pGVM != NULL
2390 && pVM == pGVM /** @todo drop pGVM */
2391 && idCpu < pGVM->cCpus
2392 && pGVM->pSession == pSession
2393 && pGVM->pSelf == pVM)
2394 {
2395 switch (enmOperation)
2396 {
2397 /* These might/will be called before VMMR3Init. */
2398 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2399 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2400 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2401 case VMMR0_DO_GMM_FREE_PAGES:
2402 case VMMR0_DO_GMM_BALLOONED_PAGES:
2403 /* On the mac we might not have a valid jmp buf, so check these as well. */
2404 case VMMR0_DO_VMMR0_INIT:
2405 case VMMR0_DO_VMMR0_TERM:
2406
2407 case VMMR0_DO_PDM_DEVICE_CREATE:
2408 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2409 case VMMR0_DO_IOM_GROW_IO_PORTS:
2410 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2411
2412#ifdef VBOX_WITH_LOTS_OF_DBGF_BPS
2413 case VMMR0_DO_DBGF_BP_INIT:
2414 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2415 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2416#endif
2417 {
2418 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2419 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2420 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2421 && pGVCpu->hNativeThreadR0 == hNativeThread))
2422 {
2423 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2424 break;
2425
2426 /** @todo validate this EMT claim... GVM knows. */
2427 VMMR0ENTRYEXARGS Args;
2428 Args.pGVM = pGVM;
2429 Args.idCpu = idCpu;
2430 Args.enmOperation = enmOperation;
2431 Args.pReq = pReq;
2432 Args.u64Arg = u64Arg;
2433 Args.pSession = pSession;
2434 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2435 }
2436 return VERR_VM_THREAD_NOT_EMT;
2437 }
2438
2439 default:
2440 case VMMR0_DO_PGM_POOL_GROW:
2441 break;
2442 }
2443 }
2444 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2445}
2446
2447
2448/**
2449 * Checks whether we've armed the ring-0 long jump machinery.
2450 *
2451 * @returns @c true / @c false
2452 * @param pVCpu The cross context virtual CPU structure.
2453 * @thread EMT
2454 * @sa VMMIsLongJumpArmed
2455 */
2456VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2457{
2458#ifdef RT_ARCH_X86
2459 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2460 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2461#else
2462 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2463 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2464#endif
2465}
2466
2467
2468/**
2469 * Checks whether we've done a ring-3 long jump.
2470 *
2471 * @returns @c true / @c false
2472 * @param pVCpu The cross context virtual CPU structure.
2473 * @thread EMT
2474 */
2475VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2476{
2477 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2478}
2479
2480
2481/**
2482 * Internal R0 logger worker: Flush logger.
2483 *
2484 * @param pLogger The logger instance to flush.
2485 * @remark This function must be exported!
2486 */
2487VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2488{
2489#ifdef LOG_ENABLED
2490 /*
2491 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2492 * (This is a bit paranoid code.)
2493 */
2494 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2495 if ( !VALID_PTR(pR0Logger)
2496 || !VALID_PTR(pR0Logger + 1)
2497 || pLogger->u32Magic != RTLOGGER_MAGIC)
2498 {
2499# ifdef DEBUG
2500 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2501# endif
2502 return;
2503 }
2504 if (pR0Logger->fFlushingDisabled)
2505 return; /* quietly */
2506
2507 PVMCC pVM = pR0Logger->pVM;
2508 if ( !VALID_PTR(pVM)
2509 || pVM->pSelf != pVM)
2510 {
2511# ifdef DEBUG
2512 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2513# endif
2514 return;
2515 }
2516
2517 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2518 if (pVCpu)
2519 {
2520 /*
2521 * Check that the jump buffer is armed.
2522 */
2523# ifdef RT_ARCH_X86
2524 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2525 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2526# else
2527 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2528 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2529# endif
2530 {
2531# ifdef DEBUG
2532 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2533# endif
2534 return;
2535 }
2536 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2537 }
2538# ifdef DEBUG
2539 else
2540 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2541# endif
2542#else
2543 NOREF(pLogger);
2544#endif /* LOG_ENABLED */
2545}
2546
2547#ifdef LOG_ENABLED
2548
2549/**
2550 * Disables flushing of the ring-0 debug log.
2551 *
2552 * @param pVCpu The cross context virtual CPU structure.
2553 */
2554VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2555{
2556 if (pVCpu->vmm.s.pR0LoggerR0)
2557 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2558 if (pVCpu->vmm.s.pR0RelLoggerR0)
2559 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2560}
2561
2562
2563/**
2564 * Enables flushing of the ring-0 debug log.
2565 *
2566 * @param pVCpu The cross context virtual CPU structure.
2567 */
2568VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2569{
2570 if (pVCpu->vmm.s.pR0LoggerR0)
2571 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2572 if (pVCpu->vmm.s.pR0RelLoggerR0)
2573 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2574}
2575
2576
2577/**
2578 * Checks if log flushing is disabled or not.
2579 *
2580 * @param pVCpu The cross context virtual CPU structure.
2581 */
2582VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2583{
2584 if (pVCpu->vmm.s.pR0LoggerR0)
2585 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2586 if (pVCpu->vmm.s.pR0RelLoggerR0)
2587 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2588 return true;
2589}
2590
2591#endif /* LOG_ENABLED */
2592
2593/**
2594 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2595 */
2596DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2597{
2598 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2599 if (pGVCpu)
2600 {
2601 PVMCPUCC pVCpu = pGVCpu;
2602 if (RT_VALID_PTR(pVCpu))
2603 {
2604 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2605 if (RT_VALID_PTR(pVmmLogger))
2606 {
2607 if ( pVmmLogger->fCreated
2608 && pVmmLogger->pVM == pGVCpu->pGVM)
2609 {
2610 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2611 return NULL;
2612 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2613 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2614 if ( iGroup != UINT16_MAX
2615 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2616 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2617 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2618 return NULL;
2619 return &pVmmLogger->Logger;
2620 }
2621 }
2622 }
2623 }
2624 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2625}
2626
2627
2628/**
2629 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2630 *
2631 * @returns true if the breakpoint should be hit, false if it should be ignored.
2632 */
2633DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2634{
2635#if 0
2636 return true;
2637#else
2638 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2639 if (pVM)
2640 {
2641 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2642
2643 if (pVCpu)
2644 {
2645#ifdef RT_ARCH_X86
2646 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2647 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2648#else
2649 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2650 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2651#endif
2652 {
2653 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2654 return RT_FAILURE_NP(rc);
2655 }
2656 }
2657 }
2658#ifdef RT_OS_LINUX
2659 return true;
2660#else
2661 return false;
2662#endif
2663#endif
2664}
2665
2666
2667/**
2668 * Override this so we can push it up to ring-3.
2669 *
2670 * @param pszExpr Expression. Can be NULL.
2671 * @param uLine Location line number.
2672 * @param pszFile Location file name.
2673 * @param pszFunction Location function name.
2674 */
2675DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2676{
2677 /*
2678 * To the log.
2679 */
2680 LogAlways(("\n!!R0-Assertion Failed!!\n"
2681 "Expression: %s\n"
2682 "Location : %s(%d) %s\n",
2683 pszExpr, pszFile, uLine, pszFunction));
2684
2685 /*
2686 * To the global VMM buffer.
2687 */
2688 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2689 if (pVM)
2690 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2691 "\n!!R0-Assertion Failed!!\n"
2692 "Expression: %.*s\n"
2693 "Location : %s(%d) %s\n",
2694 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2695 pszFile, uLine, pszFunction);
2696
2697 /*
2698 * Continue the normal way.
2699 */
2700 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2701}
2702
2703
2704/**
2705 * Callback for RTLogFormatV which writes to the ring-3 log port.
2706 * See PFNLOGOUTPUT() for details.
2707 */
2708static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2709{
2710 for (size_t i = 0; i < cbChars; i++)
2711 {
2712 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2713 }
2714
2715 NOREF(pv);
2716 return cbChars;
2717}
2718
2719
2720/**
2721 * Override this so we can push it up to ring-3.
2722 *
2723 * @param pszFormat The format string.
2724 * @param va Arguments.
2725 */
2726DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2727{
2728 va_list vaCopy;
2729
2730 /*
2731 * Push the message to the loggers.
2732 */
2733 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2734 if (pLog)
2735 {
2736 va_copy(vaCopy, va);
2737 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2738 va_end(vaCopy);
2739 }
2740 pLog = RTLogRelGetDefaultInstance();
2741 if (pLog)
2742 {
2743 va_copy(vaCopy, va);
2744 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2745 va_end(vaCopy);
2746 }
2747
2748 /*
2749 * Push it to the global VMM buffer.
2750 */
2751 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2752 if (pVM)
2753 {
2754 va_copy(vaCopy, va);
2755 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2756 va_end(vaCopy);
2757 }
2758
2759 /*
2760 * Continue the normal way.
2761 */
2762 RTAssertMsg2V(pszFormat, va);
2763}
2764
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette