VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 80274

Last change on this file since 80274 was 80274, checked in by vboxsync, 5 years ago

VMM: Refactoring VMMR0/* and VMMRZ/* to use VMCC & VMMCPUCC. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 96.8 KB
Line 
1/* $Id: VMMR0.cpp 80274 2019-08-14 14:34:38Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_BUGREF_9217_PART_I
23#define LOG_GROUP LOG_GROUP_VMM
24#include <VBox/vmm/vmm.h>
25#include <VBox/sup.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mp.h>
58#include <iprt/once.h>
59#include <iprt/stdarg.h>
60#include <iprt/string.h>
61#include <iprt/thread.h>
62#include <iprt/timer.h>
63#include <iprt/time.h>
64
65#include "dtrace/VBoxVMM.h"
66
67
68#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
69# pragma intrinsic(_AddressOfReturnAddress)
70#endif
71
72#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
73# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
74#endif
75
76
77
78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
81/** @def VMM_CHECK_SMAP_SETUP
82 * SMAP check setup. */
83/** @def VMM_CHECK_SMAP_CHECK
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
85 * it will be logged and @a a_BadExpr is executed. */
86/** @def VMM_CHECK_SMAP_CHECK2
87 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
88 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
89 * executed. */
90#if defined(VBOX_STRICT) || 1
91# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
92# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
102 a_BadExpr; \
103 } \
104 } \
105 } while (0)
106# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
107 do { \
108 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
109 { \
110 RTCCUINTREG fEflCheck = ASMGetFlags(); \
111 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
112 { /* likely */ } \
113 else \
114 { \
115 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
116 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
117 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
118 a_BadExpr; \
119 } \
120 } \
121 } while (0)
122#else
123# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
124# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
125# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
126#endif
127
128
129/*********************************************************************************************************************************
130* Internal Functions *
131*********************************************************************************************************************************/
132RT_C_DECLS_BEGIN
133#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
134extern uint64_t __udivdi3(uint64_t, uint64_t);
135extern uint64_t __umoddi3(uint64_t, uint64_t);
136#endif
137RT_C_DECLS_END
138
139
140/*********************************************************************************************************************************
141* Global Variables *
142*********************************************************************************************************************************/
143/** Drag in necessary library bits.
144 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
145PFNRT g_VMMR0Deps[] =
146{
147 (PFNRT)RTCrc32,
148 (PFNRT)RTOnce,
149#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
150 (PFNRT)__udivdi3,
151 (PFNRT)__umoddi3,
152#endif
153 NULL
154};
155
156#ifdef RT_OS_SOLARIS
157/* Dependency information for the native solaris loader. */
158extern "C" { char _depends_on[] = "vboxdrv"; }
159#endif
160
161/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
162int g_rcRawModeUsability = VINF_SUCCESS;
163
164
165/**
166 * Initialize the module.
167 * This is called when we're first loaded.
168 *
169 * @returns 0 on success.
170 * @returns VBox status on failure.
171 * @param hMod Image handle for use in APIs.
172 */
173DECLEXPORT(int) ModuleInit(void *hMod)
174{
175 VMM_CHECK_SMAP_SETUP();
176 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
177
178#ifdef VBOX_WITH_DTRACE_R0
179 /*
180 * The first thing to do is register the static tracepoints.
181 * (Deregistration is automatic.)
182 */
183 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
184 if (RT_FAILURE(rc2))
185 return rc2;
186#endif
187 LogFlow(("ModuleInit:\n"));
188
189#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
190 /*
191 * Display the CMOS debug code.
192 */
193 ASMOutU8(0x72, 0x03);
194 uint8_t bDebugCode = ASMInU8(0x73);
195 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
196 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
197#endif
198
199 /*
200 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
201 */
202 int rc = vmmInitFormatTypes();
203 if (RT_SUCCESS(rc))
204 {
205 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
206 rc = GVMMR0Init();
207 if (RT_SUCCESS(rc))
208 {
209 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
210 rc = GMMR0Init();
211 if (RT_SUCCESS(rc))
212 {
213 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
214 rc = HMR0Init();
215 if (RT_SUCCESS(rc))
216 {
217 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
218 rc = PGMRegisterStringFormatTypes();
219 if (RT_SUCCESS(rc))
220 {
221 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
222#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
223 rc = PGMR0DynMapInit();
224#endif
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228 rc = IntNetR0Init();
229 if (RT_SUCCESS(rc))
230 {
231#ifdef VBOX_WITH_PCI_PASSTHROUGH
232 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
233 rc = PciRawR0Init();
234#endif
235 if (RT_SUCCESS(rc))
236 {
237 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
238 rc = CPUMR0ModuleInit();
239 if (RT_SUCCESS(rc))
240 {
241#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
242 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
243 rc = vmmR0TripleFaultHackInit();
244 if (RT_SUCCESS(rc))
245#endif
246 {
247 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
248 if (RT_SUCCESS(rc))
249 {
250 g_rcRawModeUsability = SUPR0GetRawModeUsability();
251 if (g_rcRawModeUsability != VINF_SUCCESS)
252 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
253 g_rcRawModeUsability);
254 LogFlow(("ModuleInit: returns success\n"));
255 return VINF_SUCCESS;
256 }
257 }
258
259 /*
260 * Bail out.
261 */
262#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
263 vmmR0TripleFaultHackTerm();
264#endif
265 }
266 else
267 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
268#ifdef VBOX_WITH_PCI_PASSTHROUGH
269 PciRawR0Term();
270#endif
271 }
272 else
273 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
274 IntNetR0Term();
275 }
276 else
277 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
278#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
279 PGMR0DynMapTerm();
280#endif
281 }
282 else
283 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
284 PGMDeregisterStringFormatTypes();
285 }
286 else
287 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
288 HMR0Term();
289 }
290 else
291 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
292 GMMR0Term();
293 }
294 else
295 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
296 GVMMR0Term();
297 }
298 else
299 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
300 vmmTermFormatTypes();
301 }
302 else
303 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
304
305 LogFlow(("ModuleInit: failed %Rrc\n", rc));
306 return rc;
307}
308
309
310/**
311 * Terminate the module.
312 * This is called when we're finally unloaded.
313 *
314 * @param hMod Image handle for use in APIs.
315 */
316DECLEXPORT(void) ModuleTerm(void *hMod)
317{
318 NOREF(hMod);
319 LogFlow(("ModuleTerm:\n"));
320
321 /*
322 * Terminate the CPUM module (Local APIC cleanup).
323 */
324 CPUMR0ModuleTerm();
325
326 /*
327 * Terminate the internal network service.
328 */
329 IntNetR0Term();
330
331 /*
332 * PGM (Darwin), HM and PciRaw global cleanup.
333 */
334#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
335 PGMR0DynMapTerm();
336#endif
337#ifdef VBOX_WITH_PCI_PASSTHROUGH
338 PciRawR0Term();
339#endif
340 PGMDeregisterStringFormatTypes();
341 HMR0Term();
342#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
343 vmmR0TripleFaultHackTerm();
344#endif
345
346 /*
347 * Destroy the GMM and GVMM instances.
348 */
349 GMMR0Term();
350 GVMMR0Term();
351
352 vmmTermFormatTypes();
353
354 LogFlow(("ModuleTerm: returns\n"));
355}
356
357
358/**
359 * Initiates the R0 driver for a particular VM instance.
360 *
361 * @returns VBox status code.
362 *
363 * @param pGVM The global (ring-0) VM structure.
364 * @param pVM The cross context VM structure.
365 * @param uSvnRev The SVN revision of the ring-3 part.
366 * @param uBuildType Build type indicator.
367 * @thread EMT(0)
368 */
369static int vmmR0InitVM(PGVM pGVM, PVMCC pVM, uint32_t uSvnRev, uint32_t uBuildType)
370{
371 VMM_CHECK_SMAP_SETUP();
372 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
373
374 /*
375 * Match the SVN revisions and build type.
376 */
377 if (uSvnRev != VMMGetSvnRev())
378 {
379 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
380 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
381 return VERR_VMM_R0_VERSION_MISMATCH;
382 }
383 if (uBuildType != vmmGetBuildType())
384 {
385 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
386 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
387 return VERR_VMM_R0_VERSION_MISMATCH;
388 }
389
390 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
391 if (RT_FAILURE(rc))
392 return rc;
393
394#ifdef LOG_ENABLED
395 /*
396 * Register the EMT R0 logger instance for VCPU 0.
397 */
398#ifdef VBOX_BUGREF_9217
399 PVMCPUCC pVCpu = &pGVM->aCpus[0];
400#else
401 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pVM);
402#endif
403
404 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
405 if (pR0Logger)
406 {
407# if 0 /* testing of the logger. */
408 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
409 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
410 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
411 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
412
413 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
414 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
415 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
416 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
417
418 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
419 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
420 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
421 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
422
423 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
424 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
425 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
426 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
427 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
428 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
429
430 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
431 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
432
433 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
434 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
435 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
436# endif
437 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
438 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
439 pR0Logger->fRegistered = true;
440 }
441#endif /* LOG_ENABLED */
442
443 /*
444 * Check if the host supports high resolution timers or not.
445 */
446 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
447 && !RTTimerCanDoHighResolution())
448 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
449
450 /*
451 * Initialize the per VM data for GVMM and GMM.
452 */
453 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
454 rc = GVMMR0InitVM(pGVM);
455// if (RT_SUCCESS(rc))
456// rc = GMMR0InitPerVMData(pVM);
457 if (RT_SUCCESS(rc))
458 {
459 /*
460 * Init HM, CPUM and PGM (Darwin only).
461 */
462 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
463 rc = HMR0InitVM(pVM);
464 if (RT_SUCCESS(rc))
465 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
466 if (RT_SUCCESS(rc))
467 {
468 rc = CPUMR0InitVM(pVM);
469 if (RT_SUCCESS(rc))
470 {
471 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
472#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
473 rc = PGMR0DynMapInitVM(pVM);
474#endif
475 if (RT_SUCCESS(rc))
476 {
477 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
478#ifdef VBOX_BUGREF_9217
479 rc = EMR0InitVM(pGVM);
480#else
481 rc = EMR0InitVM(pGVM, pVM);
482#endif
483 if (RT_SUCCESS(rc))
484 {
485 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
486#ifdef VBOX_WITH_PCI_PASSTHROUGH
487 rc = PciRawR0InitVM(pGVM, pVM);
488#endif
489 if (RT_SUCCESS(rc))
490 {
491 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
492 rc = GIMR0InitVM(pVM);
493 if (RT_SUCCESS(rc))
494 {
495 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
496 if (RT_SUCCESS(rc))
497 {
498 GVMMR0DoneInitVM(pGVM);
499
500 /*
501 * Collect a bit of info for the VM release log.
502 */
503 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
504 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
505
506 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
507 return rc;
508 }
509
510 /* bail out*/
511 GIMR0TermVM(pVM);
512 }
513#ifdef VBOX_WITH_PCI_PASSTHROUGH
514 PciRawR0TermVM(pGVM, pVM);
515#endif
516 }
517 }
518 }
519 }
520 HMR0TermVM(pVM);
521 }
522 }
523
524 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
525 return rc;
526}
527
528
529/**
530 * Does EMT specific VM initialization.
531 *
532 * @returns VBox status code.
533 * @param pGVM The ring-0 VM structure.
534 * @param pVM The cross context VM structure.
535 * @param idCpu The EMT that's calling.
536 */
537static int vmmR0InitVMEmt(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
538{
539 /* Paranoia (caller checked these already). */
540 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
541 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
542
543#ifdef LOG_ENABLED
544 /*
545 * Registration of ring 0 loggers.
546 */
547#ifdef VBOX_BUGREF_9217
548 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
549#else
550 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
551#endif
552 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
553 if ( pR0Logger
554 && !pR0Logger->fRegistered)
555 {
556 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
557 pR0Logger->fRegistered = true;
558 }
559#endif
560 RT_NOREF(pVM);
561
562 return VINF_SUCCESS;
563}
564
565
566
567/**
568 * Terminates the R0 bits for a particular VM instance.
569 *
570 * This is normally called by ring-3 as part of the VM termination process, but
571 * may alternatively be called during the support driver session cleanup when
572 * the VM object is destroyed (see GVMM).
573 *
574 * @returns VBox status code.
575 *
576 * @param pGVM The global (ring-0) VM structure.
577 * @param pVM The cross context VM structure.
578 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
579 * thread.
580 * @thread EMT(0) or session clean up thread.
581 */
582VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
583{
584 /*
585 * Check EMT(0) claim if we're called from userland.
586 */
587 if (idCpu != NIL_VMCPUID)
588 {
589 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
590 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
591 if (RT_FAILURE(rc))
592 return rc;
593 }
594
595#ifdef VBOX_WITH_PCI_PASSTHROUGH
596 PciRawR0TermVM(pGVM, pVM);
597#endif
598
599 /*
600 * Tell GVMM what we're up to and check that we only do this once.
601 */
602 if (GVMMR0DoingTermVM(pGVM))
603 {
604 GIMR0TermVM(pVM);
605
606 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
607 * here to make sure we don't leak any shared pages if we crash... */
608#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
609 PGMR0DynMapTermVM(pVM);
610#endif
611 HMR0TermVM(pVM);
612 }
613
614 /*
615 * Deregister the logger.
616 */
617 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
618 return VINF_SUCCESS;
619}
620
621
622/**
623 * An interrupt or unhalt force flag is set, deal with it.
624 *
625 * @returns VINF_SUCCESS (or VINF_EM_HALT).
626 * @param pVCpu The cross context virtual CPU structure.
627 * @param uMWait Result from EMMonitorWaitIsActive().
628 * @param enmInterruptibility Guest CPU interruptbility level.
629 */
630static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
631{
632 Assert(!TRPMHasTrap(pVCpu));
633 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
634 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
635
636 /*
637 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
638 */
639 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
640 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
641 {
642 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
643 {
644 uint8_t u8Interrupt = 0;
645 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
646 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
647 if (RT_SUCCESS(rc))
648 {
649 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
650
651 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
652 AssertRCSuccess(rc);
653 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
654 return rc;
655 }
656 }
657 }
658 /*
659 * SMI is not implemented yet, at least not here.
660 */
661 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
662 {
663 return VINF_EM_HALT;
664 }
665 /*
666 * NMI.
667 */
668 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
669 {
670 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
671 {
672 /** @todo later. */
673 return VINF_EM_HALT;
674 }
675 }
676 /*
677 * Nested-guest virtual interrupt.
678 */
679 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
680 {
681 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
682 {
683 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
684 * here before injecting the virtual interrupt. See emR3ForcedActions
685 * for details. */
686 return VINF_EM_HALT;
687 }
688 }
689
690 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
691 {
692 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
693 return VINF_SUCCESS;
694 }
695 if (uMWait > 1)
696 {
697 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
698 return VINF_SUCCESS;
699 }
700
701 return VINF_EM_HALT;
702}
703
704
705/**
706 * This does one round of vmR3HaltGlobal1Halt().
707 *
708 * The rational here is that we'll reduce latency in interrupt situations if we
709 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
710 * MWAIT), but do one round of blocking here instead and hope the interrupt is
711 * raised in the meanwhile.
712 *
713 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
714 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
715 * ring-0 call (unless we're too close to a timer event). When the interrupt
716 * wakes us up, we'll return from ring-0 and EM will by instinct do a
717 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
718 * back to VMMR0EntryFast().
719 *
720 * @returns VINF_SUCCESS or VINF_EM_HALT.
721 * @param pGVM The ring-0 VM structure.
722 * @param pVM The cross context VM structure.
723 * @param pGVCpu The ring-0 virtual CPU structure.
724 * @param pVCpu The cross context virtual CPU structure.
725 *
726 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
727 * the VM module, probably to VMM. Then this would be more weird wrt
728 * parameters and statistics.
729 */
730static int vmmR0DoHalt(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu)
731{
732#ifdef VBOX_BUGREF_9217
733 Assert(pVCpu == pGVCpu);
734#else
735 Assert(pVCpu == pGVCpu->pVCpu);
736#endif
737
738 /*
739 * Do spin stat historization.
740 */
741 if (++pVCpu->vmm.s.cR0Halts & 0xff)
742 { /* likely */ }
743 else if (pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3)
744 {
745 pVCpu->vmm.s.cR0HaltsSucceeded = 2;
746 pVCpu->vmm.s.cR0HaltsToRing3 = 0;
747 }
748 else
749 {
750 pVCpu->vmm.s.cR0HaltsSucceeded = 0;
751 pVCpu->vmm.s.cR0HaltsToRing3 = 2;
752 }
753
754 /*
755 * Flags that makes us go to ring-3.
756 */
757 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
758 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
759 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
760 | VM_FF_PGM_NO_MEMORY | VM_FF_REM_HANDLER_NOTIFY | VM_FF_DEBUG_SUSPEND;
761 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
762 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
763 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
764 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
765
766 /*
767 * Check preconditions.
768 */
769 unsigned const uMWait = EMMonitorWaitIsActive(pVCpu);
770 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pVCpu);
771 if ( pVCpu->vmm.s.fMayHaltInRing0
772 && !TRPMHasTrap(pVCpu)
773 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
774 || uMWait > 1))
775 {
776 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
777 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
778 {
779 /*
780 * Interrupts pending already?
781 */
782 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
783 APICUpdatePendingInterrupts(pVCpu);
784
785 /*
786 * Flags that wake up from the halted state.
787 */
788 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
789 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
790
791 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
792 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
793 ASMNopPause();
794
795 /*
796 * Check out how long till the next timer event.
797 */
798 uint64_t u64Delta;
799 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
800
801 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
802 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
803 {
804 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
805 APICUpdatePendingInterrupts(pVCpu);
806
807 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
808 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
809
810 /*
811 * Wait if there is enough time to the next timer event.
812 */
813 if (u64Delta >= pVCpu->vmm.s.cNsSpinBlockThreshold)
814 {
815 /* If there are few other CPU cores around, we will procrastinate a
816 little before going to sleep, hoping for some device raising an
817 interrupt or similar. Though, the best thing here would be to
818 dynamically adjust the spin count according to its usfulness or
819 something... */
820 if ( pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3
821 && RTMpGetOnlineCount() >= 4)
822 {
823 /** @todo Figure out how we can skip this if it hasn't help recently...
824 * @bugref{9172#c12} */
825 uint32_t cSpinLoops = 42;
826 while (cSpinLoops-- > 0)
827 {
828 ASMNopPause();
829 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
830 APICUpdatePendingInterrupts(pVCpu);
831 ASMNopPause();
832 if (VM_FF_IS_ANY_SET(pVM, fVmFFs))
833 {
834 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
835 return VINF_EM_HALT;
836 }
837 ASMNopPause();
838 if (VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
839 {
840 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
841 return VINF_EM_HALT;
842 }
843 ASMNopPause();
844 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
845 {
846 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromSpin);
847 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
848 }
849 ASMNopPause();
850 }
851 }
852
853 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
854 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
855 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
856 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
857 int rc = GVMMR0SchedHalt(pGVM, pVM, pGVCpu, u64GipTime);
858 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
859 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
860 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
861 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
862 if ( rc == VINF_SUCCESS
863 || rc == VERR_INTERRUPTED)
864
865 {
866 /* Keep some stats like ring-3 does. */
867 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
868 if (cNsOverslept > 50000)
869 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
870 else if (cNsOverslept < -50000)
871 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
872 else
873 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
874
875 /*
876 * Recheck whether we can resume execution or have to go to ring-3.
877 */
878 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
879 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
880 {
881 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
882 APICUpdatePendingInterrupts(pVCpu);
883 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
884 {
885 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromBlock);
886 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
887 }
888 }
889 }
890 }
891 }
892 }
893 }
894 return VINF_EM_HALT;
895}
896
897
898/**
899 * VMM ring-0 thread-context callback.
900 *
901 * This does common HM state updating and calls the HM-specific thread-context
902 * callback.
903 *
904 * @param enmEvent The thread-context event.
905 * @param pvUser Opaque pointer to the VMCPU.
906 *
907 * @thread EMT(pvUser)
908 */
909static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
910{
911 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
912
913 switch (enmEvent)
914 {
915 case RTTHREADCTXEVENT_IN:
916 {
917 /*
918 * Linux may call us with preemption enabled (really!) but technically we
919 * cannot get preempted here, otherwise we end up in an infinite recursion
920 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
921 * ad infinitum). Let's just disable preemption for now...
922 */
923 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
924 * preemption after doing the callout (one or two functions up the
925 * call chain). */
926 /** @todo r=ramshankar: See @bugref{5313#c30}. */
927 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
928 RTThreadPreemptDisable(&ParanoidPreemptState);
929
930 /* We need to update the VCPU <-> host CPU mapping. */
931 RTCPUID idHostCpu;
932 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
933 pVCpu->iHostCpuSet = iHostCpuSet;
934 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
935
936 /* In the very unlikely event that the GIP delta for the CPU we're
937 rescheduled needs calculating, try force a return to ring-3.
938 We unfortunately cannot do the measurements right here. */
939 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
940 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
941
942 /* Invoke the HM-specific thread-context callback. */
943 HMR0ThreadCtxCallback(enmEvent, pvUser);
944
945 /* Restore preemption. */
946 RTThreadPreemptRestore(&ParanoidPreemptState);
947 break;
948 }
949
950 case RTTHREADCTXEVENT_OUT:
951 {
952 /* Invoke the HM-specific thread-context callback. */
953 HMR0ThreadCtxCallback(enmEvent, pvUser);
954
955 /*
956 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
957 * have the same host CPU associated with it.
958 */
959 pVCpu->iHostCpuSet = UINT32_MAX;
960 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
961 break;
962 }
963
964 default:
965 /* Invoke the HM-specific thread-context callback. */
966 HMR0ThreadCtxCallback(enmEvent, pvUser);
967 break;
968 }
969}
970
971
972/**
973 * Creates thread switching hook for the current EMT thread.
974 *
975 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
976 * platform does not implement switcher hooks, no hooks will be create and the
977 * member set to NIL_RTTHREADCTXHOOK.
978 *
979 * @returns VBox status code.
980 * @param pVCpu The cross context virtual CPU structure.
981 * @thread EMT(pVCpu)
982 */
983VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
984{
985 VMCPU_ASSERT_EMT(pVCpu);
986 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
987
988#if 1 /* To disable this stuff change to zero. */
989 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
990 if (RT_SUCCESS(rc))
991 return rc;
992#else
993 RT_NOREF(vmmR0ThreadCtxCallback);
994 int rc = VERR_NOT_SUPPORTED;
995#endif
996
997 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
998 if (rc == VERR_NOT_SUPPORTED)
999 return VINF_SUCCESS;
1000
1001 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1002 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1003}
1004
1005
1006/**
1007 * Destroys the thread switching hook for the specified VCPU.
1008 *
1009 * @param pVCpu The cross context virtual CPU structure.
1010 * @remarks Can be called from any thread.
1011 */
1012VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1013{
1014 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
1015 AssertRC(rc);
1016 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1017}
1018
1019
1020/**
1021 * Disables the thread switching hook for this VCPU (if we got one).
1022 *
1023 * @param pVCpu The cross context virtual CPU structure.
1024 * @thread EMT(pVCpu)
1025 *
1026 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1027 * this call. This means you have to be careful with what you do!
1028 */
1029VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1030{
1031 /*
1032 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1033 * @bugref{7726#c19} explains the need for this trick:
1034 *
1035 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
1036 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1037 * longjmp & normal return to ring-3, which opens a window where we may be
1038 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1039 * the CPU starts executing a different EMT. Both functions first disables
1040 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1041 * an opening for getting preempted.
1042 */
1043 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1044 * all the time. */
1045 /** @todo move this into the context hook disabling if(). */
1046 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1047
1048 /*
1049 * Disable the context hook, if we got one.
1050 */
1051 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1052 {
1053 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1054 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1055 AssertRC(rc);
1056 }
1057}
1058
1059
1060/**
1061 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1062 *
1063 * @returns true if registered, false otherwise.
1064 * @param pVCpu The cross context virtual CPU structure.
1065 */
1066DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1067{
1068 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1069}
1070
1071
1072/**
1073 * Whether thread-context hooks are registered for this VCPU.
1074 *
1075 * @returns true if registered, false otherwise.
1076 * @param pVCpu The cross context virtual CPU structure.
1077 */
1078VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1079{
1080 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1081}
1082
1083
1084#ifdef VBOX_WITH_STATISTICS
1085/**
1086 * Record return code statistics
1087 * @param pVM The cross context VM structure.
1088 * @param pVCpu The cross context virtual CPU structure.
1089 * @param rc The status code.
1090 */
1091static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1092{
1093 /*
1094 * Collect statistics.
1095 */
1096 switch (rc)
1097 {
1098 case VINF_SUCCESS:
1099 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1100 break;
1101 case VINF_EM_RAW_INTERRUPT:
1102 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1103 break;
1104 case VINF_EM_RAW_INTERRUPT_HYPER:
1105 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1106 break;
1107 case VINF_EM_RAW_GUEST_TRAP:
1108 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1109 break;
1110 case VINF_EM_RAW_RING_SWITCH:
1111 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1112 break;
1113 case VINF_EM_RAW_RING_SWITCH_INT:
1114 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1115 break;
1116 case VINF_EM_RAW_STALE_SELECTOR:
1117 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1118 break;
1119 case VINF_EM_RAW_IRET_TRAP:
1120 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1121 break;
1122 case VINF_IOM_R3_IOPORT_READ:
1123 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1124 break;
1125 case VINF_IOM_R3_IOPORT_WRITE:
1126 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1127 break;
1128 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1129 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1130 break;
1131 case VINF_IOM_R3_MMIO_READ:
1132 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1133 break;
1134 case VINF_IOM_R3_MMIO_WRITE:
1135 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1136 break;
1137 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1138 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1139 break;
1140 case VINF_IOM_R3_MMIO_READ_WRITE:
1141 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1142 break;
1143 case VINF_PATM_HC_MMIO_PATCH_READ:
1144 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1145 break;
1146 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1147 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1148 break;
1149 case VINF_CPUM_R3_MSR_READ:
1150 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1151 break;
1152 case VINF_CPUM_R3_MSR_WRITE:
1153 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1154 break;
1155 case VINF_EM_RAW_EMULATE_INSTR:
1156 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1157 break;
1158 case VINF_PATCH_EMULATE_INSTR:
1159 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1160 break;
1161 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1162 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1163 break;
1164 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1165 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1166 break;
1167 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1168 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1169 break;
1170 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1171 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1172 break;
1173 case VINF_CSAM_PENDING_ACTION:
1174 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1175 break;
1176 case VINF_PGM_SYNC_CR3:
1177 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1178 break;
1179 case VINF_PATM_PATCH_INT3:
1180 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1181 break;
1182 case VINF_PATM_PATCH_TRAP_PF:
1183 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1184 break;
1185 case VINF_PATM_PATCH_TRAP_GP:
1186 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1187 break;
1188 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1189 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1190 break;
1191 case VINF_EM_RESCHEDULE_REM:
1192 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1193 break;
1194 case VINF_EM_RAW_TO_R3:
1195 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1196 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1198 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1200 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1201 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1202 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1204 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1205 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1206 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1207 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1208 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1209 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1210 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1211 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1212 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1213 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1214 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1215 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1216 else
1217 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1218 break;
1219
1220 case VINF_EM_RAW_TIMER_PENDING:
1221 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1222 break;
1223 case VINF_EM_RAW_INTERRUPT_PENDING:
1224 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1225 break;
1226 case VINF_VMM_CALL_HOST:
1227 switch (pVCpu->vmm.s.enmCallRing3Operation)
1228 {
1229 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1230 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1231 break;
1232 case VMMCALLRING3_PDM_LOCK:
1233 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1234 break;
1235 case VMMCALLRING3_PGM_POOL_GROW:
1236 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1237 break;
1238 case VMMCALLRING3_PGM_LOCK:
1239 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1240 break;
1241 case VMMCALLRING3_PGM_MAP_CHUNK:
1242 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1243 break;
1244 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1245 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1246 break;
1247 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
1248 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
1249 break;
1250 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1251 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1252 break;
1253 case VMMCALLRING3_VM_SET_ERROR:
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1255 break;
1256 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1257 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1258 break;
1259 case VMMCALLRING3_VM_R0_ASSERTION:
1260 default:
1261 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1262 break;
1263 }
1264 break;
1265 case VINF_PATM_DUPLICATE_FUNCTION:
1266 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1267 break;
1268 case VINF_PGM_CHANGE_MODE:
1269 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1270 break;
1271 case VINF_PGM_POOL_FLUSH_PENDING:
1272 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1273 break;
1274 case VINF_EM_PENDING_REQUEST:
1275 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1276 break;
1277 case VINF_EM_HM_PATCH_TPR_INSTR:
1278 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1279 break;
1280 default:
1281 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1282 break;
1283 }
1284}
1285#endif /* VBOX_WITH_STATISTICS */
1286
1287
1288/**
1289 * The Ring 0 entry point, called by the fast-ioctl path.
1290 *
1291 * @param pGVM The global (ring-0) VM structure.
1292 * @param pVM The cross context VM structure.
1293 * The return code is stored in pVM->vmm.s.iLastGZRc.
1294 * @param idCpu The Virtual CPU ID of the calling EMT.
1295 * @param enmOperation Which operation to execute.
1296 * @remarks Assume called with interrupts _enabled_.
1297 */
1298VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1299{
1300 /*
1301 * Validation.
1302 */
1303 if ( idCpu < pGVM->cCpus
1304 && pGVM->cCpus == pVM->cCpus)
1305 { /*likely*/ }
1306 else
1307 {
1308 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
1309 return;
1310 }
1311
1312 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1313#ifdef VBOX_BUGREF_9217
1314 PVMCPUCC pVCpu = pGVCpu;
1315#else
1316 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
1317#endif
1318 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1319 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1320 && pVCpu->hNativeThreadR0 == hNativeThread))
1321 { /* likely */ }
1322 else
1323 {
1324 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
1325 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
1326 return;
1327 }
1328
1329 /*
1330 * SMAP fun.
1331 */
1332 VMM_CHECK_SMAP_SETUP();
1333 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1334
1335 /*
1336 * Perform requested operation.
1337 */
1338 switch (enmOperation)
1339 {
1340 /*
1341 * Run guest code using the available hardware acceleration technology.
1342 */
1343 case VMMR0_DO_HM_RUN:
1344 {
1345 for (;;) /* hlt loop */
1346 {
1347 /*
1348 * Disable preemption.
1349 */
1350 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1351 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1352 RTThreadPreemptDisable(&PreemptState);
1353
1354 /*
1355 * Get the host CPU identifiers, make sure they are valid and that
1356 * we've got a TSC delta for the CPU.
1357 */
1358 RTCPUID idHostCpu;
1359 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1360 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1361 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1362 {
1363 pVCpu->iHostCpuSet = iHostCpuSet;
1364 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1365
1366 /*
1367 * Update the periodic preemption timer if it's active.
1368 */
1369 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1370 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1371 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1372
1373#ifdef VMM_R0_TOUCH_FPU
1374 /*
1375 * Make sure we've got the FPU state loaded so and we don't need to clear
1376 * CR0.TS and get out of sync with the host kernel when loading the guest
1377 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1378 */
1379 CPUMR0TouchHostFpu();
1380#endif
1381 int rc;
1382 bool fPreemptRestored = false;
1383 if (!HMR0SuspendPending())
1384 {
1385 /*
1386 * Enable the context switching hook.
1387 */
1388 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1389 {
1390 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1391 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1392 }
1393
1394 /*
1395 * Enter HM context.
1396 */
1397 rc = HMR0Enter(pVCpu);
1398 if (RT_SUCCESS(rc))
1399 {
1400 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1401
1402 /*
1403 * When preemption hooks are in place, enable preemption now that
1404 * we're in HM context.
1405 */
1406 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1407 {
1408 fPreemptRestored = true;
1409 RTThreadPreemptRestore(&PreemptState);
1410 }
1411
1412 /*
1413 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1414 */
1415 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1416 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1417 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1418
1419 /*
1420 * Assert sanity on the way out. Using manual assertions code here as normal
1421 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1422 */
1423 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1424 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1425 {
1426 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1427 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1428 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1429 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1430 }
1431 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1432 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1433 {
1434 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1435 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1436 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1437 rc = VERR_INVALID_STATE;
1438 }
1439
1440 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1441 }
1442 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
1443
1444 /*
1445 * Invalidate the host CPU identifiers before we disable the context
1446 * hook / restore preemption.
1447 */
1448 pVCpu->iHostCpuSet = UINT32_MAX;
1449 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1450
1451 /*
1452 * Disable context hooks. Due to unresolved cleanup issues, we
1453 * cannot leave the hooks enabled when we return to ring-3.
1454 *
1455 * Note! At the moment HM may also have disabled the hook
1456 * when we get here, but the IPRT API handles that.
1457 */
1458 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1459 {
1460 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1461 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1462 }
1463 }
1464 /*
1465 * The system is about to go into suspend mode; go back to ring 3.
1466 */
1467 else
1468 {
1469 rc = VINF_EM_RAW_INTERRUPT;
1470 pVCpu->iHostCpuSet = UINT32_MAX;
1471 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1472 }
1473
1474 /** @todo When HM stops messing with the context hook state, we'll disable
1475 * preemption again before the RTThreadCtxHookDisable call. */
1476 if (!fPreemptRestored)
1477 RTThreadPreemptRestore(&PreemptState);
1478
1479 pVCpu->vmm.s.iLastGZRc = rc;
1480
1481 /* Fire dtrace probe and collect statistics. */
1482 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1483#ifdef VBOX_WITH_STATISTICS
1484 vmmR0RecordRC(pVM, pVCpu, rc);
1485#endif
1486#if 1
1487 /*
1488 * If this is a halt.
1489 */
1490 if (rc != VINF_EM_HALT)
1491 { /* we're not in a hurry for a HLT, so prefer this path */ }
1492 else
1493 {
1494 pVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu);
1495 if (rc == VINF_SUCCESS)
1496 {
1497 pVCpu->vmm.s.cR0HaltsSucceeded++;
1498 continue;
1499 }
1500 pVCpu->vmm.s.cR0HaltsToRing3++;
1501 }
1502#endif
1503 }
1504 /*
1505 * Invalid CPU set index or TSC delta in need of measuring.
1506 */
1507 else
1508 {
1509 pVCpu->iHostCpuSet = UINT32_MAX;
1510 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1511 RTThreadPreemptRestore(&PreemptState);
1512 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1513 {
1514 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1515 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1516 0 /*default cTries*/);
1517 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1518 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1519 else
1520 pVCpu->vmm.s.iLastGZRc = rc;
1521 }
1522 else
1523 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1524 }
1525 break;
1526
1527 } /* halt loop. */
1528 break;
1529 }
1530
1531#ifdef VBOX_WITH_NEM_R0
1532# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1533 case VMMR0_DO_NEM_RUN:
1534 {
1535 /*
1536 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1537 */
1538 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1539 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1540 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1541 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
1542
1543 pVCpu->vmm.s.iLastGZRc = rc;
1544
1545 /*
1546 * Fire dtrace probe and collect statistics.
1547 */
1548 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1549# ifdef VBOX_WITH_STATISTICS
1550 vmmR0RecordRC(pVM, pVCpu, rc);
1551# endif
1552 break;
1553 }
1554# endif
1555#endif
1556
1557 /*
1558 * For profiling.
1559 */
1560 case VMMR0_DO_NOP:
1561 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1562 break;
1563
1564 /*
1565 * Shouldn't happen.
1566 */
1567 default:
1568 AssertMsgFailed(("%#x\n", enmOperation));
1569 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1570 break;
1571 }
1572 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1573}
1574
1575
1576/**
1577 * Validates a session or VM session argument.
1578 *
1579 * @returns true / false accordingly.
1580 * @param pVM The cross context VM structure.
1581 * @param pClaimedSession The session claim to validate.
1582 * @param pSession The session argument.
1583 */
1584DECLINLINE(bool) vmmR0IsValidSession(PVMCC pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1585{
1586 /* This must be set! */
1587 if (!pSession)
1588 return false;
1589
1590 /* Only one out of the two. */
1591 if (pVM && pClaimedSession)
1592 return false;
1593 if (pVM)
1594 pClaimedSession = pVM->pSession;
1595 return pClaimedSession == pSession;
1596}
1597
1598
1599/**
1600 * VMMR0EntryEx worker function, either called directly or when ever possible
1601 * called thru a longjmp so we can exit safely on failure.
1602 *
1603 * @returns VBox status code.
1604 * @param pGVM The global (ring-0) VM structure.
1605 * @param pVM The cross context VM structure.
1606 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1607 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1608 * @param enmOperation Which operation to execute.
1609 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1610 * The support driver validates this if it's present.
1611 * @param u64Arg Some simple constant argument.
1612 * @param pSession The session of the caller.
1613 *
1614 * @remarks Assume called with interrupts _enabled_.
1615 */
1616static int vmmR0EntryExWorker(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1617 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1618{
1619 /*
1620 * Validate pGVM, pVM and idCpu for consistency and validity.
1621 */
1622 if ( pGVM != NULL
1623 || pVM != NULL)
1624 {
1625 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1626 && RT_VALID_PTR(pVM)
1627 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1628 { /* likely */ }
1629 else
1630 {
1631 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1632 return VERR_INVALID_POINTER;
1633 }
1634
1635#ifdef VBOX_BUGREF_9217
1636 if (RT_LIKELY(pGVM == pVM))
1637#else
1638 if (RT_LIKELY(pGVM->pVM == pVM))
1639#endif
1640 { /* likely */ }
1641 else
1642 {
1643#ifdef VBOX_BUGREF_9217
1644 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM/pVM=%p\n", pVM, pGVM);
1645#else
1646 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1647#endif
1648 return VERR_INVALID_PARAMETER;
1649 }
1650
1651 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1652 { /* likely */ }
1653 else
1654 {
1655 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1656 return VERR_INVALID_PARAMETER;
1657 }
1658
1659#ifdef VBOX_BUGREF_9217
1660 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1661 && pVM->enmVMState <= VMSTATE_TERMINATED
1662 && pVM->cCpus == pGVM->cCpus
1663 && pVM->pSession == pSession
1664 && pVM->pSelf == pVM))
1665#else
1666 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1667 && pVM->enmVMState <= VMSTATE_TERMINATED
1668 && pVM->cCpus == pGVM->cCpus
1669 && pVM->pSession == pSession
1670 && pVM->pVMR0 == pVM))
1671#endif
1672 { /* likely */ }
1673 else
1674 {
1675#ifdef VBOX_BUGREF_9217
1676 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1677 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pSelf, pVM, enmOperation);
1678#else
1679 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1680 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1681#endif
1682 return VERR_INVALID_POINTER;
1683 }
1684 }
1685 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1686 { /* likely */ }
1687 else
1688 {
1689 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1690 return VERR_INVALID_PARAMETER;
1691 }
1692
1693 /*
1694 * SMAP fun.
1695 */
1696 VMM_CHECK_SMAP_SETUP();
1697 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1698
1699 /*
1700 * Process the request.
1701 */
1702 int rc;
1703 switch (enmOperation)
1704 {
1705 /*
1706 * GVM requests
1707 */
1708 case VMMR0_DO_GVMM_CREATE_VM:
1709 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1710 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1711 else
1712 rc = VERR_INVALID_PARAMETER;
1713 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1714 break;
1715
1716 case VMMR0_DO_GVMM_DESTROY_VM:
1717 if (pReqHdr == NULL && u64Arg == 0)
1718 rc = GVMMR0DestroyVM(pGVM, pVM);
1719 else
1720 rc = VERR_INVALID_PARAMETER;
1721 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1722 break;
1723
1724 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1725 if (pGVM != NULL && pVM != NULL)
1726 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1727 else
1728 rc = VERR_INVALID_PARAMETER;
1729 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1730 break;
1731
1732 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1733 if (pGVM != NULL && pVM != NULL)
1734 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1735 else
1736 rc = VERR_INVALID_PARAMETER;
1737 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1738 break;
1739
1740 case VMMR0_DO_GVMM_SCHED_HALT:
1741 if (pReqHdr)
1742 return VERR_INVALID_PARAMETER;
1743 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1744 rc = GVMMR0SchedHaltReq(pGVM, pVM, idCpu, u64Arg);
1745 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1746 break;
1747
1748 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1749 if (pReqHdr || u64Arg)
1750 return VERR_INVALID_PARAMETER;
1751 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1752 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1753 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1754 break;
1755
1756 case VMMR0_DO_GVMM_SCHED_POKE:
1757 if (pReqHdr || u64Arg)
1758 return VERR_INVALID_PARAMETER;
1759 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1760 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1761 break;
1762
1763 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1764 if (u64Arg)
1765 return VERR_INVALID_PARAMETER;
1766 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1767 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1768 break;
1769
1770 case VMMR0_DO_GVMM_SCHED_POLL:
1771 if (pReqHdr || u64Arg > 1)
1772 return VERR_INVALID_PARAMETER;
1773 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1774 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1775 break;
1776
1777 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1778 if (u64Arg)
1779 return VERR_INVALID_PARAMETER;
1780 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1781 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1782 break;
1783
1784 case VMMR0_DO_GVMM_RESET_STATISTICS:
1785 if (u64Arg)
1786 return VERR_INVALID_PARAMETER;
1787 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1788 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1789 break;
1790
1791 /*
1792 * Initialize the R0 part of a VM instance.
1793 */
1794 case VMMR0_DO_VMMR0_INIT:
1795 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1796 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1797 break;
1798
1799 /*
1800 * Does EMT specific ring-0 init.
1801 */
1802 case VMMR0_DO_VMMR0_INIT_EMT:
1803 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
1804 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1805 break;
1806
1807 /*
1808 * Terminate the R0 part of a VM instance.
1809 */
1810 case VMMR0_DO_VMMR0_TERM:
1811 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1812 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1813 break;
1814
1815 /*
1816 * Attempt to enable hm mode and check the current setting.
1817 */
1818 case VMMR0_DO_HM_ENABLE:
1819 rc = HMR0EnableAllCpus(pVM);
1820 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1821 break;
1822
1823 /*
1824 * Setup the hardware accelerated session.
1825 */
1826 case VMMR0_DO_HM_SETUP_VM:
1827 rc = HMR0SetupVM(pVM);
1828 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1829 break;
1830
1831 /*
1832 * PGM wrappers.
1833 */
1834 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1835 if (idCpu == NIL_VMCPUID)
1836 return VERR_INVALID_CPU_ID;
1837 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1838 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1839 break;
1840
1841 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1842 if (idCpu == NIL_VMCPUID)
1843 return VERR_INVALID_CPU_ID;
1844 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1845 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1846 break;
1847
1848 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1849 if (idCpu == NIL_VMCPUID)
1850 return VERR_INVALID_CPU_ID;
1851 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1852 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1853 break;
1854
1855 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1856 if (idCpu != 0)
1857 return VERR_INVALID_CPU_ID;
1858 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1859 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1860 break;
1861
1862 /*
1863 * GMM wrappers.
1864 */
1865 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1866 if (u64Arg)
1867 return VERR_INVALID_PARAMETER;
1868 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1869 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1870 break;
1871
1872 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1873 if (u64Arg)
1874 return VERR_INVALID_PARAMETER;
1875 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1876 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1877 break;
1878
1879 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1880 if (u64Arg)
1881 return VERR_INVALID_PARAMETER;
1882 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1883 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1884 break;
1885
1886 case VMMR0_DO_GMM_FREE_PAGES:
1887 if (u64Arg)
1888 return VERR_INVALID_PARAMETER;
1889 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1890 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1891 break;
1892
1893 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1894 if (u64Arg)
1895 return VERR_INVALID_PARAMETER;
1896 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1897 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1898 break;
1899
1900 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1901 if (u64Arg)
1902 return VERR_INVALID_PARAMETER;
1903 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1904 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1905 break;
1906
1907 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1908 if (idCpu == NIL_VMCPUID)
1909 return VERR_INVALID_CPU_ID;
1910 if (u64Arg)
1911 return VERR_INVALID_PARAMETER;
1912 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1913 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1914 break;
1915
1916 case VMMR0_DO_GMM_BALLOONED_PAGES:
1917 if (u64Arg)
1918 return VERR_INVALID_PARAMETER;
1919 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1920 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1921 break;
1922
1923 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1924 if (u64Arg)
1925 return VERR_INVALID_PARAMETER;
1926 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1927 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1928 break;
1929
1930 case VMMR0_DO_GMM_SEED_CHUNK:
1931 if (pReqHdr)
1932 return VERR_INVALID_PARAMETER;
1933 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1934 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1935 break;
1936
1937 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1938 if (idCpu == NIL_VMCPUID)
1939 return VERR_INVALID_CPU_ID;
1940 if (u64Arg)
1941 return VERR_INVALID_PARAMETER;
1942 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1943 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1944 break;
1945
1946 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1947 if (idCpu == NIL_VMCPUID)
1948 return VERR_INVALID_CPU_ID;
1949 if (u64Arg)
1950 return VERR_INVALID_PARAMETER;
1951 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1952 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1953 break;
1954
1955 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1956 if (idCpu == NIL_VMCPUID)
1957 return VERR_INVALID_CPU_ID;
1958 if ( u64Arg
1959 || pReqHdr)
1960 return VERR_INVALID_PARAMETER;
1961 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1962 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1963 break;
1964
1965#ifdef VBOX_WITH_PAGE_SHARING
1966 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1967 {
1968 if (idCpu == NIL_VMCPUID)
1969 return VERR_INVALID_CPU_ID;
1970 if ( u64Arg
1971 || pReqHdr)
1972 return VERR_INVALID_PARAMETER;
1973 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1974 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1975 break;
1976 }
1977#endif
1978
1979#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1980 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1981 if (u64Arg)
1982 return VERR_INVALID_PARAMETER;
1983 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1984 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1985 break;
1986#endif
1987
1988 case VMMR0_DO_GMM_QUERY_STATISTICS:
1989 if (u64Arg)
1990 return VERR_INVALID_PARAMETER;
1991 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1992 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1993 break;
1994
1995 case VMMR0_DO_GMM_RESET_STATISTICS:
1996 if (u64Arg)
1997 return VERR_INVALID_PARAMETER;
1998 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1999 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2000 break;
2001
2002 /*
2003 * A quick GCFGM mock-up.
2004 */
2005 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2006 case VMMR0_DO_GCFGM_SET_VALUE:
2007 case VMMR0_DO_GCFGM_QUERY_VALUE:
2008 {
2009 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2010 return VERR_INVALID_PARAMETER;
2011 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2012 if (pReq->Hdr.cbReq != sizeof(*pReq))
2013 return VERR_INVALID_PARAMETER;
2014 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2015 {
2016 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2017 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2018 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2019 }
2020 else
2021 {
2022 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2023 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2024 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2025 }
2026 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2027 break;
2028 }
2029
2030 /*
2031 * PDM Wrappers.
2032 */
2033 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2034 {
2035 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2036 return VERR_INVALID_PARAMETER;
2037 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2038 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2039 break;
2040 }
2041
2042 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
2043 {
2044 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2045 return VERR_INVALID_PARAMETER;
2046 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
2047 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2048 break;
2049 }
2050
2051 /*
2052 * Requests to the internal networking service.
2053 */
2054 case VMMR0_DO_INTNET_OPEN:
2055 {
2056 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2057 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2058 return VERR_INVALID_PARAMETER;
2059 rc = IntNetR0OpenReq(pSession, pReq);
2060 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2061 break;
2062 }
2063
2064 case VMMR0_DO_INTNET_IF_CLOSE:
2065 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2066 return VERR_INVALID_PARAMETER;
2067 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2068 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2069 break;
2070
2071
2072 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2073 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2074 return VERR_INVALID_PARAMETER;
2075 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2076 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2077 break;
2078
2079 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2080 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2081 return VERR_INVALID_PARAMETER;
2082 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2083 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2084 break;
2085
2086 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2087 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2088 return VERR_INVALID_PARAMETER;
2089 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2090 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2091 break;
2092
2093 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2094 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2095 return VERR_INVALID_PARAMETER;
2096 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2097 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2098 break;
2099
2100 case VMMR0_DO_INTNET_IF_SEND:
2101 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2102 return VERR_INVALID_PARAMETER;
2103 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2104 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2105 break;
2106
2107 case VMMR0_DO_INTNET_IF_WAIT:
2108 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2109 return VERR_INVALID_PARAMETER;
2110 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2111 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2112 break;
2113
2114 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2115 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2116 return VERR_INVALID_PARAMETER;
2117 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2118 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2119 break;
2120
2121#ifdef VBOX_WITH_PCI_PASSTHROUGH
2122 /*
2123 * Requests to host PCI driver service.
2124 */
2125 case VMMR0_DO_PCIRAW_REQ:
2126 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2127 return VERR_INVALID_PARAMETER;
2128 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2129 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2130 break;
2131#endif
2132
2133 /*
2134 * NEM requests.
2135 */
2136#ifdef VBOX_WITH_NEM_R0
2137# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2138 case VMMR0_DO_NEM_INIT_VM:
2139 if (u64Arg || pReqHdr || idCpu != 0)
2140 return VERR_INVALID_PARAMETER;
2141 rc = NEMR0InitVM(pGVM, pVM);
2142 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2143 break;
2144
2145 case VMMR0_DO_NEM_INIT_VM_PART_2:
2146 if (u64Arg || pReqHdr || idCpu != 0)
2147 return VERR_INVALID_PARAMETER;
2148 rc = NEMR0InitVMPart2(pGVM, pVM);
2149 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2150 break;
2151
2152 case VMMR0_DO_NEM_MAP_PAGES:
2153 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2154 return VERR_INVALID_PARAMETER;
2155 rc = NEMR0MapPages(pGVM, pVM, idCpu);
2156 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2157 break;
2158
2159 case VMMR0_DO_NEM_UNMAP_PAGES:
2160 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2161 return VERR_INVALID_PARAMETER;
2162 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
2163 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2164 break;
2165
2166 case VMMR0_DO_NEM_EXPORT_STATE:
2167 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2168 return VERR_INVALID_PARAMETER;
2169 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2170 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2171 break;
2172
2173 case VMMR0_DO_NEM_IMPORT_STATE:
2174 if (pReqHdr || idCpu == NIL_VMCPUID)
2175 return VERR_INVALID_PARAMETER;
2176 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2177 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2178 break;
2179
2180 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2181 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2182 return VERR_INVALID_PARAMETER;
2183 rc = NEMR0QueryCpuTick(pGVM, pVM, idCpu);
2184 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2185 break;
2186
2187 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2188 if (pReqHdr || idCpu == NIL_VMCPUID)
2189 return VERR_INVALID_PARAMETER;
2190 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg);
2191 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2192 break;
2193
2194 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2195 if (u64Arg || pReqHdr)
2196 return VERR_INVALID_PARAMETER;
2197 rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu);
2198 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2199 break;
2200
2201# if 1 && defined(DEBUG_bird)
2202 case VMMR0_DO_NEM_EXPERIMENT:
2203 if (pReqHdr)
2204 return VERR_INVALID_PARAMETER;
2205 rc = NEMR0DoExperiment(pGVM, pVM, idCpu, u64Arg);
2206 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2207 break;
2208# endif
2209# endif
2210#endif
2211
2212 /*
2213 * For profiling.
2214 */
2215 case VMMR0_DO_NOP:
2216 case VMMR0_DO_SLOW_NOP:
2217 return VINF_SUCCESS;
2218
2219 /*
2220 * For testing Ring-0 APIs invoked in this environment.
2221 */
2222 case VMMR0_DO_TESTS:
2223 /** @todo make new test */
2224 return VINF_SUCCESS;
2225
2226 default:
2227 /*
2228 * We're returning VERR_NOT_SUPPORT here so we've got something else
2229 * than -1 which the interrupt gate glue code might return.
2230 */
2231 Log(("operation %#x is not supported\n", enmOperation));
2232 return VERR_NOT_SUPPORTED;
2233 }
2234 return rc;
2235}
2236
2237
2238/**
2239 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2240 */
2241typedef struct VMMR0ENTRYEXARGS
2242{
2243 PGVM pGVM;
2244 PVMCC pVM;
2245 VMCPUID idCpu;
2246 VMMR0OPERATION enmOperation;
2247 PSUPVMMR0REQHDR pReq;
2248 uint64_t u64Arg;
2249 PSUPDRVSESSION pSession;
2250} VMMR0ENTRYEXARGS;
2251/** Pointer to a vmmR0EntryExWrapper argument package. */
2252typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2253
2254/**
2255 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2256 *
2257 * @returns VBox status code.
2258 * @param pvArgs The argument package
2259 */
2260static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2261{
2262 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2263 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2264 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2265 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2266 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2267 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2268 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2269}
2270
2271
2272/**
2273 * The Ring 0 entry point, called by the support library (SUP).
2274 *
2275 * @returns VBox status code.
2276 * @param pGVM The global (ring-0) VM structure.
2277 * @param pVM The cross context VM structure.
2278 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2279 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2280 * @param enmOperation Which operation to execute.
2281 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2282 * @param u64Arg Some simple constant argument.
2283 * @param pSession The session of the caller.
2284 * @remarks Assume called with interrupts _enabled_.
2285 */
2286VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2287 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2288{
2289 /*
2290 * Requests that should only happen on the EMT thread will be
2291 * wrapped in a setjmp so we can assert without causing trouble.
2292 */
2293 if ( pVM != NULL
2294 && pGVM != NULL
2295 && idCpu < pGVM->cCpus
2296 && pVM->pSession == pSession
2297#ifdef VBOX_BUGREF_9217
2298 && pVM->pSelf != NULL
2299#else
2300 && pVM->pVMR0 != NULL
2301#endif
2302 )
2303 {
2304 switch (enmOperation)
2305 {
2306 /* These might/will be called before VMMR3Init. */
2307 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2308 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2309 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2310 case VMMR0_DO_GMM_FREE_PAGES:
2311 case VMMR0_DO_GMM_BALLOONED_PAGES:
2312 /* On the mac we might not have a valid jmp buf, so check these as well. */
2313 case VMMR0_DO_VMMR0_INIT:
2314 case VMMR0_DO_VMMR0_TERM:
2315 {
2316 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2317#ifdef VBOX_BUGREF_9217
2318 PVMCPUCC pVCpu = pGVCpu;
2319#else
2320 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2321#endif
2322 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2323 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2324 && pVCpu->hNativeThreadR0 == hNativeThread))
2325 {
2326 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2327 break;
2328
2329 /** @todo validate this EMT claim... GVM knows. */
2330 VMMR0ENTRYEXARGS Args;
2331 Args.pGVM = pGVM;
2332 Args.pVM = pVM;
2333 Args.idCpu = idCpu;
2334 Args.enmOperation = enmOperation;
2335 Args.pReq = pReq;
2336 Args.u64Arg = u64Arg;
2337 Args.pSession = pSession;
2338 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2339 }
2340 return VERR_VM_THREAD_NOT_EMT;
2341 }
2342
2343 default:
2344 break;
2345 }
2346 }
2347 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2348}
2349
2350
2351/**
2352 * Checks whether we've armed the ring-0 long jump machinery.
2353 *
2354 * @returns @c true / @c false
2355 * @param pVCpu The cross context virtual CPU structure.
2356 * @thread EMT
2357 * @sa VMMIsLongJumpArmed
2358 */
2359VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2360{
2361#ifdef RT_ARCH_X86
2362 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2363 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2364#else
2365 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2366 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2367#endif
2368}
2369
2370
2371/**
2372 * Checks whether we've done a ring-3 long jump.
2373 *
2374 * @returns @c true / @c false
2375 * @param pVCpu The cross context virtual CPU structure.
2376 * @thread EMT
2377 */
2378VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2379{
2380 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2381}
2382
2383
2384/**
2385 * Internal R0 logger worker: Flush logger.
2386 *
2387 * @param pLogger The logger instance to flush.
2388 * @remark This function must be exported!
2389 */
2390VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2391{
2392#ifdef LOG_ENABLED
2393 /*
2394 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2395 * (This is a bit paranoid code.)
2396 */
2397 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2398 if ( !VALID_PTR(pR0Logger)
2399 || !VALID_PTR(pR0Logger + 1)
2400 || pLogger->u32Magic != RTLOGGER_MAGIC)
2401 {
2402# ifdef DEBUG
2403 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2404# endif
2405 return;
2406 }
2407 if (pR0Logger->fFlushingDisabled)
2408 return; /* quietly */
2409
2410 PVMCC pVM = pR0Logger->pVM;
2411 if ( !VALID_PTR(pVM)
2412# ifdef VBOX_BUGREF_9217
2413 || pVM->pSelf != pVM
2414# else
2415 || pVM->pVMR0 != pVM
2416# endif
2417 )
2418 {
2419# ifdef DEBUG
2420# ifdef VBOX_BUGREF_9217
2421 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2422# else
2423 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2424# endif
2425# endif
2426 return;
2427 }
2428
2429 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2430 if (pVCpu)
2431 {
2432 /*
2433 * Check that the jump buffer is armed.
2434 */
2435# ifdef RT_ARCH_X86
2436 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2437 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2438# else
2439 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2440 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2441# endif
2442 {
2443# ifdef DEBUG
2444 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2445# endif
2446 return;
2447 }
2448 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2449 }
2450# ifdef DEBUG
2451 else
2452 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2453# endif
2454#else
2455 NOREF(pLogger);
2456#endif /* LOG_ENABLED */
2457}
2458
2459#ifdef LOG_ENABLED
2460
2461/**
2462 * Disables flushing of the ring-0 debug log.
2463 *
2464 * @param pVCpu The cross context virtual CPU structure.
2465 */
2466VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2467{
2468 if (pVCpu->vmm.s.pR0LoggerR0)
2469 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2470 if (pVCpu->vmm.s.pR0RelLoggerR0)
2471 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2472}
2473
2474
2475/**
2476 * Enables flushing of the ring-0 debug log.
2477 *
2478 * @param pVCpu The cross context virtual CPU structure.
2479 */
2480VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2481{
2482 if (pVCpu->vmm.s.pR0LoggerR0)
2483 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2484 if (pVCpu->vmm.s.pR0RelLoggerR0)
2485 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2486}
2487
2488
2489/**
2490 * Checks if log flushing is disabled or not.
2491 *
2492 * @param pVCpu The cross context virtual CPU structure.
2493 */
2494VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2495{
2496 if (pVCpu->vmm.s.pR0LoggerR0)
2497 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2498 if (pVCpu->vmm.s.pR0RelLoggerR0)
2499 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2500 return true;
2501}
2502
2503#endif /* LOG_ENABLED */
2504
2505/**
2506 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2507 */
2508DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2509{
2510 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2511 if (pGVCpu)
2512 {
2513#ifdef VBOX_BUGREF_9217
2514 PVMCPUCC pVCpu = pGVCpu;
2515#else
2516 PVMCPUCC pVCpu = pGVCpu->pVCpu;
2517#endif
2518 if (RT_VALID_PTR(pVCpu))
2519 {
2520 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2521 if (RT_VALID_PTR(pVmmLogger))
2522 {
2523 if ( pVmmLogger->fCreated
2524#ifdef VBOX_BUGREF_9217
2525 && pVmmLogger->pVM == pGVCpu->pGVM
2526#else
2527 && pVmmLogger->pVM == pGVCpu->pVM
2528#endif
2529 )
2530 {
2531 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2532 return NULL;
2533 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2534 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2535 if ( iGroup != UINT16_MAX
2536 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2537 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2538 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2539 return NULL;
2540 return &pVmmLogger->Logger;
2541 }
2542 }
2543 }
2544 }
2545 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2546}
2547
2548
2549/**
2550 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2551 *
2552 * @returns true if the breakpoint should be hit, false if it should be ignored.
2553 */
2554DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2555{
2556#if 0
2557 return true;
2558#else
2559 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2560 if (pVM)
2561 {
2562 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2563
2564 if (pVCpu)
2565 {
2566#ifdef RT_ARCH_X86
2567 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2568 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2569#else
2570 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2571 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2572#endif
2573 {
2574 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2575 return RT_FAILURE_NP(rc);
2576 }
2577 }
2578 }
2579#ifdef RT_OS_LINUX
2580 return true;
2581#else
2582 return false;
2583#endif
2584#endif
2585}
2586
2587
2588/**
2589 * Override this so we can push it up to ring-3.
2590 *
2591 * @param pszExpr Expression. Can be NULL.
2592 * @param uLine Location line number.
2593 * @param pszFile Location file name.
2594 * @param pszFunction Location function name.
2595 */
2596DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2597{
2598 /*
2599 * To the log.
2600 */
2601 LogAlways(("\n!!R0-Assertion Failed!!\n"
2602 "Expression: %s\n"
2603 "Location : %s(%d) %s\n",
2604 pszExpr, pszFile, uLine, pszFunction));
2605
2606 /*
2607 * To the global VMM buffer.
2608 */
2609 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2610 if (pVM)
2611 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2612 "\n!!R0-Assertion Failed!!\n"
2613 "Expression: %.*s\n"
2614 "Location : %s(%d) %s\n",
2615 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2616 pszFile, uLine, pszFunction);
2617
2618 /*
2619 * Continue the normal way.
2620 */
2621 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2622}
2623
2624
2625/**
2626 * Callback for RTLogFormatV which writes to the ring-3 log port.
2627 * See PFNLOGOUTPUT() for details.
2628 */
2629static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2630{
2631 for (size_t i = 0; i < cbChars; i++)
2632 {
2633 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2634 }
2635
2636 NOREF(pv);
2637 return cbChars;
2638}
2639
2640
2641/**
2642 * Override this so we can push it up to ring-3.
2643 *
2644 * @param pszFormat The format string.
2645 * @param va Arguments.
2646 */
2647DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2648{
2649 va_list vaCopy;
2650
2651 /*
2652 * Push the message to the loggers.
2653 */
2654 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2655 if (pLog)
2656 {
2657 va_copy(vaCopy, va);
2658 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2659 va_end(vaCopy);
2660 }
2661 pLog = RTLogRelGetDefaultInstance();
2662 if (pLog)
2663 {
2664 va_copy(vaCopy, va);
2665 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2666 va_end(vaCopy);
2667 }
2668
2669 /*
2670 * Push it to the global VMM buffer.
2671 */
2672 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2673 if (pVM)
2674 {
2675 va_copy(vaCopy, va);
2676 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2677 va_end(vaCopy);
2678 }
2679
2680 /*
2681 * Continue the normal way.
2682 */
2683 RTAssertMsg2V(pszFormat, va);
2684}
2685
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette