VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 60441

Last change on this file since 60441 was 60398, checked in by vboxsync, 9 years ago

VMM/APIC: Get rid of specialized R0 code and clean up ordering issues.
It's still not nice that CPUMR3Reset() momentarily gets an un-initialized APIC base MSR until
it's re-cached again using CPUMR3InitCompleted() but I hope to eventually get rid of this
caching business entirely once the old APIC infrastructure can be kicked out.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 80.9 KB
Line 
1/* $Id: VMMR0.cpp 60398 2016-04-08 16:29:01Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/tm.h>
31#include "VMMInternal.h"
32#include <VBox/vmm/vm.h>
33#ifdef VBOX_WITH_PCI_PASSTHROUGH
34# include <VBox/vmm/pdmpci.h>
35#endif
36#ifdef VBOX_WITH_NEW_APIC
37# include <VBox/vmm/apic.h>
38#endif
39
40#include <VBox/vmm/gvmm.h>
41#include <VBox/vmm/gmm.h>
42#include <VBox/vmm/gim.h>
43#include <VBox/intnet.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/version.h>
48#include <VBox/log.h>
49
50#include <iprt/asm-amd64-x86.h>
51#include <iprt/assert.h>
52#include <iprt/crc.h>
53#include <iprt/mp.h>
54#include <iprt/once.h>
55#include <iprt/stdarg.h>
56#include <iprt/string.h>
57#include <iprt/thread.h>
58#include <iprt/timer.h>
59
60#include "dtrace/VBoxVMM.h"
61
62
63#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
64# pragma intrinsic(_AddressOfReturnAddress)
65#endif
66
67#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
68# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
69#endif
70
71
72
73/*********************************************************************************************************************************
74* Defined Constants And Macros *
75*********************************************************************************************************************************/
76/** @def VMM_CHECK_SMAP_SETUP
77 * SMAP check setup. */
78/** @def VMM_CHECK_SMAP_CHECK
79 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
80 * it will be logged and @a a_BadExpr is executed. */
81/** @def VMM_CHECK_SMAP_CHECK2
82 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
83 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
84 * executed. */
85#if defined(VBOX_STRICT) || 1
86# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
87# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
88 do { \
89 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
90 { \
91 RTCCUINTREG fEflCheck = ASMGetFlags(); \
92 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
93 { /* likely */ } \
94 else \
95 { \
96 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
97 a_BadExpr; \
98 } \
99 } \
100 } while (0)
101# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
102 do { \
103 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
104 { \
105 RTCCUINTREG fEflCheck = ASMGetFlags(); \
106 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
107 { /* likely */ } \
108 else \
109 { \
110 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
111 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
112 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
113 a_BadExpr; \
114 } \
115 } \
116 } while (0)
117#else
118# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
119# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
120# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
121#endif
122
123
124/*********************************************************************************************************************************
125* Internal Functions *
126*********************************************************************************************************************************/
127RT_C_DECLS_BEGIN
128#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
129extern uint64_t __udivdi3(uint64_t, uint64_t);
130extern uint64_t __umoddi3(uint64_t, uint64_t);
131#endif
132RT_C_DECLS_END
133
134
135/*********************************************************************************************************************************
136* Global Variables *
137*********************************************************************************************************************************/
138/** Drag in necessary library bits.
139 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
140PFNRT g_VMMR0Deps[] =
141{
142 (PFNRT)RTCrc32,
143 (PFNRT)RTOnce,
144#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
145 (PFNRT)__udivdi3,
146 (PFNRT)__umoddi3,
147#endif
148 NULL
149};
150
151#ifdef RT_OS_SOLARIS
152/* Dependency information for the native solaris loader. */
153extern "C" { char _depends_on[] = "vboxdrv"; }
154#endif
155
156
157
158/**
159 * Initialize the module.
160 * This is called when we're first loaded.
161 *
162 * @returns 0 on success.
163 * @returns VBox status on failure.
164 * @param hMod Image handle for use in APIs.
165 */
166DECLEXPORT(int) ModuleInit(void *hMod)
167{
168 VMM_CHECK_SMAP_SETUP();
169 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
170
171#ifdef VBOX_WITH_DTRACE_R0
172 /*
173 * The first thing to do is register the static tracepoints.
174 * (Deregistration is automatic.)
175 */
176 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
177 if (RT_FAILURE(rc2))
178 return rc2;
179#endif
180 LogFlow(("ModuleInit:\n"));
181
182#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
183 /*
184 * Display the CMOS debug code.
185 */
186 ASMOutU8(0x72, 0x03);
187 uint8_t bDebugCode = ASMInU8(0x73);
188 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
189 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
190#endif
191
192 /*
193 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
194 */
195 int rc = vmmInitFormatTypes();
196 if (RT_SUCCESS(rc))
197 {
198 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
199 rc = GVMMR0Init();
200 if (RT_SUCCESS(rc))
201 {
202 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
203 rc = GMMR0Init();
204 if (RT_SUCCESS(rc))
205 {
206 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
207 rc = HMR0Init();
208 if (RT_SUCCESS(rc))
209 {
210 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
211 rc = PGMRegisterStringFormatTypes();
212 if (RT_SUCCESS(rc))
213 {
214 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
215#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
216 rc = PGMR0DynMapInit();
217#endif
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221 rc = IntNetR0Init();
222 if (RT_SUCCESS(rc))
223 {
224#ifdef VBOX_WITH_PCI_PASSTHROUGH
225 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
226 rc = PciRawR0Init();
227#endif
228 if (RT_SUCCESS(rc))
229 {
230 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
231 rc = CPUMR0ModuleInit();
232 if (RT_SUCCESS(rc))
233 {
234#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
235 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
236 rc = vmmR0TripleFaultHackInit();
237 if (RT_SUCCESS(rc))
238#endif
239 {
240 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
241 if (RT_SUCCESS(rc))
242 {
243 LogFlow(("ModuleInit: returns success.\n"));
244 return VINF_SUCCESS;
245 }
246 }
247
248 /*
249 * Bail out.
250 */
251#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
252 vmmR0TripleFaultHackTerm();
253#endif
254 }
255 else
256 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
257#ifdef VBOX_WITH_PCI_PASSTHROUGH
258 PciRawR0Term();
259#endif
260 }
261 else
262 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
263 IntNetR0Term();
264 }
265 else
266 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
267#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
268 PGMR0DynMapTerm();
269#endif
270 }
271 else
272 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
273 PGMDeregisterStringFormatTypes();
274 }
275 else
276 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
277 HMR0Term();
278 }
279 else
280 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
281 GMMR0Term();
282 }
283 else
284 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
285 GVMMR0Term();
286 }
287 else
288 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
289 vmmTermFormatTypes();
290 }
291 else
292 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
293
294 LogFlow(("ModuleInit: failed %Rrc\n", rc));
295 return rc;
296}
297
298
299/**
300 * Terminate the module.
301 * This is called when we're finally unloaded.
302 *
303 * @param hMod Image handle for use in APIs.
304 */
305DECLEXPORT(void) ModuleTerm(void *hMod)
306{
307 NOREF(hMod);
308 LogFlow(("ModuleTerm:\n"));
309
310 /*
311 * Terminate the CPUM module (Local APIC cleanup).
312 */
313 CPUMR0ModuleTerm();
314
315 /*
316 * Terminate the internal network service.
317 */
318 IntNetR0Term();
319
320 /*
321 * PGM (Darwin), HM and PciRaw global cleanup.
322 */
323#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
324 PGMR0DynMapTerm();
325#endif
326#ifdef VBOX_WITH_PCI_PASSTHROUGH
327 PciRawR0Term();
328#endif
329 PGMDeregisterStringFormatTypes();
330 HMR0Term();
331#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
332 vmmR0TripleFaultHackTerm();
333#endif
334
335 /*
336 * Destroy the GMM and GVMM instances.
337 */
338 GMMR0Term();
339 GVMMR0Term();
340
341 vmmTermFormatTypes();
342
343 LogFlow(("ModuleTerm: returns\n"));
344}
345
346
347/**
348 * Initiates the R0 driver for a particular VM instance.
349 *
350 * @returns VBox status code.
351 *
352 * @param pVM The cross context VM structure.
353 * @param uSvnRev The SVN revision of the ring-3 part.
354 * @param uBuildType Build type indicator.
355 * @thread EMT.
356 */
357static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
358{
359 VMM_CHECK_SMAP_SETUP();
360 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
361
362 /*
363 * Match the SVN revisions and build type.
364 */
365 if (uSvnRev != VMMGetSvnRev())
366 {
367 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
368 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
369 return VERR_VMM_R0_VERSION_MISMATCH;
370 }
371 if (uBuildType != vmmGetBuildType())
372 {
373 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
374 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
375 return VERR_VMM_R0_VERSION_MISMATCH;
376 }
377 if ( !VALID_PTR(pVM)
378 || pVM->pVMR0 != pVM)
379 return VERR_INVALID_PARAMETER;
380
381
382#ifdef LOG_ENABLED
383 /*
384 * Register the EMT R0 logger instance for VCPU 0.
385 */
386 PVMCPU pVCpu = &pVM->aCpus[0];
387
388 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
389 if (pR0Logger)
390 {
391# if 0 /* testing of the logger. */
392 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
393 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
394 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
395 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
396
397 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
398 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
399 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
400 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
401
402 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
403 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
404 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
405 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
406
407 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
408 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
409 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
410 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
411 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
412 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
413
414 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
415 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
416
417 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
418 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
419 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
420# endif
421 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
422 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
423 pR0Logger->fRegistered = true;
424 }
425#endif /* LOG_ENABLED */
426
427 /*
428 * Check if the host supports high resolution timers or not.
429 */
430 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
431 && !RTTimerCanDoHighResolution())
432 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
433
434 /*
435 * Initialize the per VM data for GVMM and GMM.
436 */
437 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
438 int rc = GVMMR0InitVM(pVM);
439// if (RT_SUCCESS(rc))
440// rc = GMMR0InitPerVMData(pVM);
441 if (RT_SUCCESS(rc))
442 {
443 /*
444 * Init HM, CPUM and PGM (Darwin only).
445 */
446 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
447 rc = HMR0InitVM(pVM);
448 if (RT_SUCCESS(rc))
449 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
450 if (RT_SUCCESS(rc))
451 {
452 rc = CPUMR0InitVM(pVM);
453 if (RT_SUCCESS(rc))
454 {
455 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
456#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
457 rc = PGMR0DynMapInitVM(pVM);
458#endif
459 if (RT_SUCCESS(rc))
460 {
461 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
462#ifdef VBOX_WITH_PCI_PASSTHROUGH
463 rc = PciRawR0InitVM(pVM);
464#endif
465 if (RT_SUCCESS(rc))
466 {
467 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
468 rc = GIMR0InitVM(pVM);
469 if (RT_SUCCESS(rc))
470 {
471 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
472 if (RT_SUCCESS(rc))
473 {
474 GVMMR0DoneInitVM(pVM);
475 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
476 return rc;
477 }
478
479 /* bail out*/
480 GIMR0TermVM(pVM);
481 }
482#ifdef VBOX_WITH_PCI_PASSTHROUGH
483 PciRawR0TermVM(pVM);
484#endif
485 }
486 }
487 }
488 HMR0TermVM(pVM);
489 }
490 }
491
492 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
493 return rc;
494}
495
496
497/**
498 * Terminates the R0 bits for a particular VM instance.
499 *
500 * This is normally called by ring-3 as part of the VM termination process, but
501 * may alternatively be called during the support driver session cleanup when
502 * the VM object is destroyed (see GVMM).
503 *
504 * @returns VBox status code.
505 *
506 * @param pVM The cross context VM structure.
507 * @param pGVM Pointer to the global VM structure. Optional.
508 * @thread EMT or session clean up thread.
509 */
510VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
511{
512#ifdef VBOX_WITH_PCI_PASSTHROUGH
513 PciRawR0TermVM(pVM);
514#endif
515
516 /*
517 * Tell GVMM what we're up to and check that we only do this once.
518 */
519 if (GVMMR0DoingTermVM(pVM, pGVM))
520 {
521 GIMR0TermVM(pVM);
522
523 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
524 * here to make sure we don't leak any shared pages if we crash... */
525#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
526 PGMR0DynMapTermVM(pVM);
527#endif
528 HMR0TermVM(pVM);
529 }
530
531 /*
532 * Deregister the logger.
533 */
534 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * VMM ring-0 thread-context callback.
541 *
542 * This does common HM state updating and calls the HM-specific thread-context
543 * callback.
544 *
545 * @param enmEvent The thread-context event.
546 * @param pvUser Opaque pointer to the VMCPU.
547 *
548 * @thread EMT(pvUser)
549 */
550static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
551{
552 PVMCPU pVCpu = (PVMCPU)pvUser;
553
554 switch (enmEvent)
555 {
556 case RTTHREADCTXEVENT_IN:
557 {
558 /*
559 * Linux may call us with preemption enabled (really!) but technically we
560 * cannot get preempted here, otherwise we end up in an infinite recursion
561 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
562 * ad infinitum). Let's just disable preemption for now...
563 */
564 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
565 * preemption after doing the callout (one or two functions up the
566 * call chain). */
567 /** @todo r=ramshankar: See @bugref{5313#c30}. */
568 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
569 RTThreadPreemptDisable(&ParanoidPreemptState);
570
571 /* We need to update the VCPU <-> host CPU mapping. */
572 RTCPUID idHostCpu;
573 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
574 pVCpu->iHostCpuSet = iHostCpuSet;
575 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
576
577 /* In the very unlikely event that the GIP delta for the CPU we're
578 rescheduled needs calculating, try force a return to ring-3.
579 We unfortunately cannot do the measurements right here. */
580 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
581 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
582
583 /* Invoke the HM-specific thread-context callback. */
584 HMR0ThreadCtxCallback(enmEvent, pvUser);
585
586 /* Restore preemption. */
587 RTThreadPreemptRestore(&ParanoidPreemptState);
588 break;
589 }
590
591 case RTTHREADCTXEVENT_OUT:
592 {
593 /* Invoke the HM-specific thread-context callback. */
594 HMR0ThreadCtxCallback(enmEvent, pvUser);
595
596 /*
597 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
598 * have the same host CPU associated with it.
599 */
600 pVCpu->iHostCpuSet = UINT32_MAX;
601 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
602 break;
603 }
604
605 default:
606 /* Invoke the HM-specific thread-context callback. */
607 HMR0ThreadCtxCallback(enmEvent, pvUser);
608 break;
609 }
610}
611
612
613/**
614 * Creates thread switching hook for the current EMT thread.
615 *
616 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
617 * platform does not implement switcher hooks, no hooks will be create and the
618 * member set to NIL_RTTHREADCTXHOOK.
619 *
620 * @returns VBox status code.
621 * @param pVCpu The cross context virtual CPU structure.
622 * @thread EMT(pVCpu)
623 */
624VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
625{
626 VMCPU_ASSERT_EMT(pVCpu);
627 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
628
629 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
630 if (RT_SUCCESS(rc))
631 return rc;
632
633 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
634 if (rc == VERR_NOT_SUPPORTED)
635 return VINF_SUCCESS;
636
637 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
638 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
639}
640
641
642/**
643 * Destroys the thread switching hook for the specified VCPU.
644 *
645 * @param pVCpu The cross context virtual CPU structure.
646 * @remarks Can be called from any thread.
647 */
648VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
649{
650 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
651 AssertRC(rc);
652}
653
654
655/**
656 * Disables the thread switching hook for this VCPU (if we got one).
657 *
658 * @param pVCpu The cross context virtual CPU structure.
659 * @thread EMT(pVCpu)
660 *
661 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
662 * this call. This means you have to be careful with what you do!
663 */
664VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
665{
666 /*
667 * Clear the VCPU <-> host CPU mapping as we've left HM context.
668 * @bugref{7726#c19} explains the need for this trick:
669 *
670 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
671 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
672 * longjmp & normal return to ring-3, which opens a window where we may be
673 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
674 * the CPU starts executing a different EMT. Both functions first disables
675 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
676 * an opening for getting preempted.
677 */
678 /** @todo Make HM not need this API! Then we could leave the hooks enabled
679 * all the time. */
680 /** @todo move this into the context hook disabling if(). */
681 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
682
683 /*
684 * Disable the context hook, if we got one.
685 */
686 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
687 {
688 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
689 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
690 AssertRC(rc);
691 }
692}
693
694
695/**
696 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
697 *
698 * @returns true if registered, false otherwise.
699 * @param pVCpu The cross context virtual CPU structure.
700 */
701DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
702{
703 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
704}
705
706
707/**
708 * Whether thread-context hooks are registered for this VCPU.
709 *
710 * @returns true if registered, false otherwise.
711 * @param pVCpu The cross context virtual CPU structure.
712 */
713VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
714{
715 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
716}
717
718
719#ifdef VBOX_WITH_STATISTICS
720/**
721 * Record return code statistics
722 * @param pVM The cross context VM structure.
723 * @param pVCpu The cross context virtual CPU structure.
724 * @param rc The status code.
725 */
726static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
727{
728 /*
729 * Collect statistics.
730 */
731 switch (rc)
732 {
733 case VINF_SUCCESS:
734 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
735 break;
736 case VINF_EM_RAW_INTERRUPT:
737 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
738 break;
739 case VINF_EM_RAW_INTERRUPT_HYPER:
740 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
741 break;
742 case VINF_EM_RAW_GUEST_TRAP:
743 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
744 break;
745 case VINF_EM_RAW_RING_SWITCH:
746 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
747 break;
748 case VINF_EM_RAW_RING_SWITCH_INT:
749 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
750 break;
751 case VINF_EM_RAW_STALE_SELECTOR:
752 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
753 break;
754 case VINF_EM_RAW_IRET_TRAP:
755 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
756 break;
757 case VINF_IOM_R3_IOPORT_READ:
758 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
759 break;
760 case VINF_IOM_R3_IOPORT_WRITE:
761 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
762 break;
763 case VINF_IOM_R3_MMIO_READ:
764 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
765 break;
766 case VINF_IOM_R3_MMIO_WRITE:
767 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
768 break;
769 case VINF_IOM_R3_MMIO_READ_WRITE:
770 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
771 break;
772 case VINF_PATM_HC_MMIO_PATCH_READ:
773 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
774 break;
775 case VINF_PATM_HC_MMIO_PATCH_WRITE:
776 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
777 break;
778 case VINF_CPUM_R3_MSR_READ:
779 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
780 break;
781 case VINF_CPUM_R3_MSR_WRITE:
782 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
783 break;
784 case VINF_EM_RAW_EMULATE_INSTR:
785 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
786 break;
787 case VINF_EM_RAW_EMULATE_IO_BLOCK:
788 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
789 break;
790 case VINF_PATCH_EMULATE_INSTR:
791 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
792 break;
793 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
794 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
795 break;
796 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
797 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
798 break;
799 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
800 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
801 break;
802 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
803 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
804 break;
805 case VINF_CSAM_PENDING_ACTION:
806 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
807 break;
808 case VINF_PGM_SYNC_CR3:
809 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
810 break;
811 case VINF_PATM_PATCH_INT3:
812 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
813 break;
814 case VINF_PATM_PATCH_TRAP_PF:
815 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
816 break;
817 case VINF_PATM_PATCH_TRAP_GP:
818 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
819 break;
820 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
821 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
822 break;
823 case VINF_EM_RESCHEDULE_REM:
824 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
825 break;
826 case VINF_EM_RAW_TO_R3:
827 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
828 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
829 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
830 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
831 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
832 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
833 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
834 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
835 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
836 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
837 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
838 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
839 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
840 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
841 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
842 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
843 else
844 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
845 break;
846
847 case VINF_EM_RAW_TIMER_PENDING:
848 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
849 break;
850 case VINF_EM_RAW_INTERRUPT_PENDING:
851 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
852 break;
853 case VINF_VMM_CALL_HOST:
854 switch (pVCpu->vmm.s.enmCallRing3Operation)
855 {
856 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
857 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
858 break;
859 case VMMCALLRING3_PDM_LOCK:
860 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
861 break;
862 case VMMCALLRING3_PGM_POOL_GROW:
863 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
864 break;
865 case VMMCALLRING3_PGM_LOCK:
866 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
867 break;
868 case VMMCALLRING3_PGM_MAP_CHUNK:
869 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
870 break;
871 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
872 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
873 break;
874 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
876 break;
877 case VMMCALLRING3_VMM_LOGGER_FLUSH:
878 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
879 break;
880 case VMMCALLRING3_VM_SET_ERROR:
881 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
882 break;
883 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
884 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
885 break;
886 case VMMCALLRING3_VM_R0_ASSERTION:
887 default:
888 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
889 break;
890 }
891 break;
892 case VINF_PATM_DUPLICATE_FUNCTION:
893 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
894 break;
895 case VINF_PGM_CHANGE_MODE:
896 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
897 break;
898 case VINF_PGM_POOL_FLUSH_PENDING:
899 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
900 break;
901 case VINF_EM_PENDING_REQUEST:
902 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
903 break;
904 case VINF_EM_HM_PATCH_TPR_INSTR:
905 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
906 break;
907 default:
908 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
909 break;
910 }
911}
912#endif /* VBOX_WITH_STATISTICS */
913
914
915/**
916 * The Ring 0 entry point, called by the fast-ioctl path.
917 *
918 * @param pVM The cross context VM structure.
919 * The return code is stored in pVM->vmm.s.iLastGZRc.
920 * @param idCpu The Virtual CPU ID of the calling EMT.
921 * @param enmOperation Which operation to execute.
922 * @remarks Assume called with interrupts _enabled_.
923 */
924VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
925{
926 /*
927 * Validation.
928 */
929 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
930 return;
931 PVMCPU pVCpu = &pVM->aCpus[idCpu];
932 if (RT_UNLIKELY(pVCpu->hNativeThreadR0 != RTThreadNativeSelf()))
933 return;
934 VMM_CHECK_SMAP_SETUP();
935 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
936
937 /*
938 * Perform requested operation.
939 */
940 switch (enmOperation)
941 {
942 /*
943 * Switch to GC and run guest raw mode code.
944 * Disable interrupts before doing the world switch.
945 */
946 case VMMR0_DO_RAW_RUN:
947 {
948#ifdef VBOX_WITH_RAW_MODE
949# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
950 /* Some safety precautions first. */
951 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
952 {
953 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
954 break;
955 }
956# endif
957
958 /*
959 * Disable preemption.
960 */
961 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
962 RTThreadPreemptDisable(&PreemptState);
963
964 /*
965 * Get the host CPU identifiers, make sure they are valid and that
966 * we've got a TSC delta for the CPU.
967 */
968 RTCPUID idHostCpu;
969 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
970 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
971 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
972 {
973 /*
974 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
975 */
976# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
977 CPUMR0SetLApic(pVCpu, iHostCpuSet);
978# endif
979 pVCpu->iHostCpuSet = iHostCpuSet;
980 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
981
982 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
983 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
984
985 /*
986 * We might need to disable VT-x if the active switcher turns off paging.
987 */
988 bool fVTxDisabled;
989 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
990 if (RT_SUCCESS(rc))
991 {
992 /*
993 * Disable interrupts and run raw-mode code. The loop is for efficiently
994 * dispatching tracepoints that fired in raw-mode context.
995 */
996 RTCCUINTREG uFlags = ASMIntDisableFlags();
997
998 for (;;)
999 {
1000 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1001 TMNotifyStartOfExecution(pVCpu);
1002
1003 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1004 pVCpu->vmm.s.iLastGZRc = rc;
1005
1006 TMNotifyEndOfExecution(pVCpu);
1007 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1008
1009 if (rc != VINF_VMM_CALL_TRACER)
1010 break;
1011 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1012 }
1013
1014 /*
1015 * Re-enable VT-x before we dispatch any pending host interrupts and
1016 * re-enables interrupts.
1017 */
1018 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1019
1020 if ( rc == VINF_EM_RAW_INTERRUPT
1021 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1022 TRPMR0DispatchHostInterrupt(pVM);
1023
1024 ASMSetFlags(uFlags);
1025
1026 /* Fire dtrace probe and collect statistics. */
1027 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1028# ifdef VBOX_WITH_STATISTICS
1029 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1030 vmmR0RecordRC(pVM, pVCpu, rc);
1031# endif
1032 }
1033 else
1034 pVCpu->vmm.s.iLastGZRc = rc;
1035
1036 /*
1037 * Invalidate the host CPU identifiers as we restore preemption.
1038 */
1039 pVCpu->iHostCpuSet = UINT32_MAX;
1040 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1041
1042 RTThreadPreemptRestore(&PreemptState);
1043 }
1044 /*
1045 * Invalid CPU set index or TSC delta in need of measuring.
1046 */
1047 else
1048 {
1049 RTThreadPreemptRestore(&PreemptState);
1050 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1051 {
1052 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1053 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1054 0 /*default cTries*/);
1055 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1056 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1057 else
1058 pVCpu->vmm.s.iLastGZRc = rc;
1059 }
1060 else
1061 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1062 }
1063
1064#else /* !VBOX_WITH_RAW_MODE */
1065 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1066#endif
1067 break;
1068 }
1069
1070 /*
1071 * Run guest code using the available hardware acceleration technology.
1072 */
1073 case VMMR0_DO_HM_RUN:
1074 {
1075 /*
1076 * Disable preemption.
1077 */
1078 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1079 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1080 RTThreadPreemptDisable(&PreemptState);
1081
1082 /*
1083 * Get the host CPU identifiers, make sure they are valid and that
1084 * we've got a TSC delta for the CPU.
1085 */
1086 RTCPUID idHostCpu;
1087 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1088 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1089 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1090 {
1091 pVCpu->iHostCpuSet = iHostCpuSet;
1092 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1093
1094 /*
1095 * Update the periodic preemption timer if it's active.
1096 */
1097 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1098 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1099 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1100
1101#ifdef LOG_ENABLED
1102 /*
1103 * Ugly: Lazy registration of ring 0 loggers.
1104 */
1105 if (pVCpu->idCpu > 0)
1106 {
1107 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1108 if ( pR0Logger
1109 && RT_UNLIKELY(!pR0Logger->fRegistered))
1110 {
1111 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1112 pR0Logger->fRegistered = true;
1113 }
1114 }
1115#endif
1116
1117 int rc;
1118 bool fPreemptRestored = false;
1119 if (!HMR0SuspendPending())
1120 {
1121 /*
1122 * Enable the context switching hook.
1123 */
1124 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1125 {
1126 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1127 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1128 }
1129
1130 /*
1131 * Enter HM context.
1132 */
1133 rc = HMR0Enter(pVM, pVCpu);
1134 if (RT_SUCCESS(rc))
1135 {
1136 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1137
1138 /*
1139 * When preemption hooks are in place, enable preemption now that
1140 * we're in HM context.
1141 */
1142 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1143 {
1144 fPreemptRestored = true;
1145 RTThreadPreemptRestore(&PreemptState);
1146 }
1147
1148 /*
1149 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1150 */
1151 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1152 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1153 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1154
1155 /*
1156 * Assert sanity on the way out. Using manual assertions code here as normal
1157 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1158 */
1159 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1160 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1161 {
1162 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1163 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1164 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1165 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1166 }
1167 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1168 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1169 {
1170 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1171 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1172 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1173 rc = VERR_INVALID_STATE;
1174 }
1175
1176 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1177 }
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1179
1180 /*
1181 * Invalidate the host CPU identifiers before we disable the context
1182 * hook / restore preemption.
1183 */
1184 pVCpu->iHostCpuSet = UINT32_MAX;
1185 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1186
1187 /*
1188 * Disable context hooks. Due to unresolved cleanup issues, we
1189 * cannot leave the hooks enabled when we return to ring-3.
1190 *
1191 * Note! At the moment HM may also have disabled the hook
1192 * when we get here, but the IPRT API handles that.
1193 */
1194 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1195 {
1196 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1197 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1198 }
1199 }
1200 /*
1201 * The system is about to go into suspend mode; go back to ring 3.
1202 */
1203 else
1204 {
1205 rc = VINF_EM_RAW_INTERRUPT;
1206 pVCpu->iHostCpuSet = UINT32_MAX;
1207 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1208 }
1209
1210 /** @todo When HM stops messing with the context hook state, we'll disable
1211 * preemption again before the RTThreadCtxHookDisable call. */
1212 if (!fPreemptRestored)
1213 RTThreadPreemptRestore(&PreemptState);
1214
1215 pVCpu->vmm.s.iLastGZRc = rc;
1216
1217 /* Fire dtrace probe and collect statistics. */
1218 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1219#ifdef VBOX_WITH_STATISTICS
1220 vmmR0RecordRC(pVM, pVCpu, rc);
1221#endif
1222 }
1223 /*
1224 * Invalid CPU set index or TSC delta in need of measuring.
1225 */
1226 else
1227 {
1228 pVCpu->iHostCpuSet = UINT32_MAX;
1229 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1230 RTThreadPreemptRestore(&PreemptState);
1231 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1232 {
1233 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1234 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1235 0 /*default cTries*/);
1236 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1237 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1238 else
1239 pVCpu->vmm.s.iLastGZRc = rc;
1240 }
1241 else
1242 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1243 }
1244 break;
1245 }
1246
1247 /*
1248 * For profiling.
1249 */
1250 case VMMR0_DO_NOP:
1251 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1252 break;
1253
1254 /*
1255 * Impossible.
1256 */
1257 default:
1258 AssertMsgFailed(("%#x\n", enmOperation));
1259 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1260 break;
1261 }
1262 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1263}
1264
1265
1266/**
1267 * Validates a session or VM session argument.
1268 *
1269 * @returns true / false accordingly.
1270 * @param pVM The cross context VM structure.
1271 * @param pClaimedSession The session claim to validate.
1272 * @param pSession The session argument.
1273 */
1274DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1275{
1276 /* This must be set! */
1277 if (!pSession)
1278 return false;
1279
1280 /* Only one out of the two. */
1281 if (pVM && pClaimedSession)
1282 return false;
1283 if (pVM)
1284 pClaimedSession = pVM->pSession;
1285 return pClaimedSession == pSession;
1286}
1287
1288
1289/**
1290 * VMMR0EntryEx worker function, either called directly or when ever possible
1291 * called thru a longjmp so we can exit safely on failure.
1292 *
1293 * @returns VBox status code.
1294 * @param pVM The cross context VM structure.
1295 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1296 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1297 * @param enmOperation Which operation to execute.
1298 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1299 * The support driver validates this if it's present.
1300 * @param u64Arg Some simple constant argument.
1301 * @param pSession The session of the caller.
1302 * @remarks Assume called with interrupts _enabled_.
1303 */
1304static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1305{
1306 /*
1307 * Common VM pointer validation.
1308 */
1309 if (pVM)
1310 {
1311 if (RT_UNLIKELY( !VALID_PTR(pVM)
1312 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1313 {
1314 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
1315 return VERR_INVALID_POINTER;
1316 }
1317 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1318 || pVM->enmVMState > VMSTATE_TERMINATED
1319 || pVM->pVMR0 != pVM))
1320 {
1321 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
1322 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
1323 return VERR_INVALID_POINTER;
1324 }
1325
1326 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
1327 {
1328 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
1329 return VERR_INVALID_PARAMETER;
1330 }
1331 }
1332 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
1333 {
1334 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1335 return VERR_INVALID_PARAMETER;
1336 }
1337 VMM_CHECK_SMAP_SETUP();
1338 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1339 int rc;
1340
1341 switch (enmOperation)
1342 {
1343 /*
1344 * GVM requests
1345 */
1346 case VMMR0_DO_GVMM_CREATE_VM:
1347 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1348 return VERR_INVALID_PARAMETER;
1349 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1350 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1351 break;
1352
1353 case VMMR0_DO_GVMM_DESTROY_VM:
1354 if (pReqHdr || u64Arg)
1355 return VERR_INVALID_PARAMETER;
1356 rc = GVMMR0DestroyVM(pVM);
1357 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1358 break;
1359
1360 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1361 {
1362 if (!pVM)
1363 return VERR_INVALID_PARAMETER;
1364 rc = GVMMR0RegisterVCpu(pVM, idCpu);
1365 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1366 break;
1367 }
1368
1369 case VMMR0_DO_GVMM_SCHED_HALT:
1370 if (pReqHdr)
1371 return VERR_INVALID_PARAMETER;
1372 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1373 rc = GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1374 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1375 break;
1376
1377 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1378 if (pReqHdr || u64Arg)
1379 return VERR_INVALID_PARAMETER;
1380 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1381 rc = GVMMR0SchedWakeUp(pVM, idCpu);
1382 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1383 break;
1384
1385 case VMMR0_DO_GVMM_SCHED_POKE:
1386 if (pReqHdr || u64Arg)
1387 return VERR_INVALID_PARAMETER;
1388 rc = GVMMR0SchedPoke(pVM, idCpu);
1389 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1390 break;
1391
1392 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1393 if (u64Arg)
1394 return VERR_INVALID_PARAMETER;
1395 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1396 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1397 break;
1398
1399 case VMMR0_DO_GVMM_SCHED_POLL:
1400 if (pReqHdr || u64Arg > 1)
1401 return VERR_INVALID_PARAMETER;
1402 rc = GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1403 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1404 break;
1405
1406 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1407 if (u64Arg)
1408 return VERR_INVALID_PARAMETER;
1409 rc = GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1410 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1411 break;
1412
1413 case VMMR0_DO_GVMM_RESET_STATISTICS:
1414 if (u64Arg)
1415 return VERR_INVALID_PARAMETER;
1416 rc = GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1417 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1418 break;
1419
1420 /*
1421 * Initialize the R0 part of a VM instance.
1422 */
1423 case VMMR0_DO_VMMR0_INIT:
1424 rc = vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1425 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1426 break;
1427
1428 /*
1429 * Terminate the R0 part of a VM instance.
1430 */
1431 case VMMR0_DO_VMMR0_TERM:
1432 rc = VMMR0TermVM(pVM, NULL);
1433 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1434 break;
1435
1436 /*
1437 * Attempt to enable hm mode and check the current setting.
1438 */
1439 case VMMR0_DO_HM_ENABLE:
1440 rc = HMR0EnableAllCpus(pVM);
1441 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1442 break;
1443
1444 /*
1445 * Setup the hardware accelerated session.
1446 */
1447 case VMMR0_DO_HM_SETUP_VM:
1448 rc = HMR0SetupVM(pVM);
1449 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1450 break;
1451
1452 /*
1453 * Switch to RC to execute Hypervisor function.
1454 */
1455 case VMMR0_DO_CALL_HYPERVISOR:
1456 {
1457#ifdef VBOX_WITH_RAW_MODE
1458 /*
1459 * Validate input / context.
1460 */
1461 if (RT_UNLIKELY(idCpu != 0))
1462 return VERR_INVALID_CPU_ID;
1463 if (RT_UNLIKELY(pVM->cCpus != 1))
1464 return VERR_INVALID_PARAMETER;
1465 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1466# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1467 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1468 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1469# endif
1470
1471 /*
1472 * Disable interrupts.
1473 */
1474 RTCCUINTREG fFlags = ASMIntDisableFlags();
1475
1476 /*
1477 * Get the host CPU identifiers, make sure they are valid and that
1478 * we've got a TSC delta for the CPU.
1479 */
1480 RTCPUID idHostCpu;
1481 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1482 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1483 {
1484 ASMSetFlags(fFlags);
1485 return VERR_INVALID_CPU_INDEX;
1486 }
1487 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1488 {
1489 ASMSetFlags(fFlags);
1490 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1491 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1492 0 /*default cTries*/);
1493 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1494 {
1495 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1496 return rc;
1497 }
1498 }
1499
1500 /*
1501 * Commit the CPU identifiers.
1502 */
1503# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1504 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1505# endif
1506 pVCpu->iHostCpuSet = iHostCpuSet;
1507 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1508
1509 /*
1510 * We might need to disable VT-x if the active switcher turns off paging.
1511 */
1512 bool fVTxDisabled;
1513 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1514 if (RT_SUCCESS(rc))
1515 {
1516 /*
1517 * Go through the wormhole...
1518 */
1519 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1520
1521 /*
1522 * Re-enable VT-x before we dispatch any pending host interrupts.
1523 */
1524 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1525
1526 if ( rc == VINF_EM_RAW_INTERRUPT
1527 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1528 TRPMR0DispatchHostInterrupt(pVM);
1529 }
1530
1531 /*
1532 * Invalidate the host CPU identifiers as we restore interrupts.
1533 */
1534 pVCpu->iHostCpuSet = UINT32_MAX;
1535 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1536 ASMSetFlags(fFlags);
1537
1538#else /* !VBOX_WITH_RAW_MODE */
1539 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1540#endif
1541 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1542 break;
1543 }
1544
1545 /*
1546 * PGM wrappers.
1547 */
1548 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1549 if (idCpu == NIL_VMCPUID)
1550 return VERR_INVALID_CPU_ID;
1551 rc = PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1552 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1553 break;
1554
1555 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1556 if (idCpu == NIL_VMCPUID)
1557 return VERR_INVALID_CPU_ID;
1558 rc = PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1559 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1560 break;
1561
1562 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1563 if (idCpu == NIL_VMCPUID)
1564 return VERR_INVALID_CPU_ID;
1565 rc = PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1566 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1567 break;
1568
1569 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1570 if (idCpu != 0)
1571 return VERR_INVALID_CPU_ID;
1572 rc = PGMR0PhysSetupIommu(pVM);
1573 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1574 break;
1575
1576 /*
1577 * GMM wrappers.
1578 */
1579 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1580 if (u64Arg)
1581 return VERR_INVALID_PARAMETER;
1582 rc = GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1583 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1584 break;
1585
1586 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1587 if (u64Arg)
1588 return VERR_INVALID_PARAMETER;
1589 rc = GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1590 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1591 break;
1592
1593 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1594 if (u64Arg)
1595 return VERR_INVALID_PARAMETER;
1596 rc = GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1597 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1598 break;
1599
1600 case VMMR0_DO_GMM_FREE_PAGES:
1601 if (u64Arg)
1602 return VERR_INVALID_PARAMETER;
1603 rc = GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1604 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1605 break;
1606
1607 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1608 if (u64Arg)
1609 return VERR_INVALID_PARAMETER;
1610 rc = GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1611 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1612 break;
1613
1614 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1615 if (u64Arg)
1616 return VERR_INVALID_PARAMETER;
1617 rc = GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1618 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1619 break;
1620
1621 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1622 if (idCpu == NIL_VMCPUID)
1623 return VERR_INVALID_CPU_ID;
1624 if (u64Arg)
1625 return VERR_INVALID_PARAMETER;
1626 rc = GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1627 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1628 break;
1629
1630 case VMMR0_DO_GMM_BALLOONED_PAGES:
1631 if (u64Arg)
1632 return VERR_INVALID_PARAMETER;
1633 rc = GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1634 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1635 break;
1636
1637 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1638 if (u64Arg)
1639 return VERR_INVALID_PARAMETER;
1640 rc = GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1641 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1642 break;
1643
1644 case VMMR0_DO_GMM_SEED_CHUNK:
1645 if (pReqHdr)
1646 return VERR_INVALID_PARAMETER;
1647 rc = GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1648 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1649 break;
1650
1651 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1652 if (idCpu == NIL_VMCPUID)
1653 return VERR_INVALID_CPU_ID;
1654 if (u64Arg)
1655 return VERR_INVALID_PARAMETER;
1656 rc = GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1657 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1658 break;
1659
1660 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1661 if (idCpu == NIL_VMCPUID)
1662 return VERR_INVALID_CPU_ID;
1663 if (u64Arg)
1664 return VERR_INVALID_PARAMETER;
1665 rc = GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1666 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1667 break;
1668
1669 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1670 if (idCpu == NIL_VMCPUID)
1671 return VERR_INVALID_CPU_ID;
1672 if ( u64Arg
1673 || pReqHdr)
1674 return VERR_INVALID_PARAMETER;
1675 rc = GMMR0ResetSharedModules(pVM, idCpu);
1676 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1677 break;
1678
1679#ifdef VBOX_WITH_PAGE_SHARING
1680 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1681 {
1682 if (idCpu == NIL_VMCPUID)
1683 return VERR_INVALID_CPU_ID;
1684 if ( u64Arg
1685 || pReqHdr)
1686 return VERR_INVALID_PARAMETER;
1687
1688 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1689 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1690
1691# ifdef DEBUG_sandervl
1692 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1693 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1694 rc = GMMR0CheckSharedModulesStart(pVM);
1695 if (rc == VINF_SUCCESS)
1696 {
1697 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1698 Assert( rc == VINF_SUCCESS
1699 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1700 GMMR0CheckSharedModulesEnd(pVM);
1701 }
1702# else
1703 rc = GMMR0CheckSharedModules(pVM, pVCpu);
1704# endif
1705 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1706 break;
1707 }
1708#endif
1709
1710#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1711 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1712 if (u64Arg)
1713 return VERR_INVALID_PARAMETER;
1714 rc = GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1715 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1716 break;
1717#endif
1718
1719 case VMMR0_DO_GMM_QUERY_STATISTICS:
1720 if (u64Arg)
1721 return VERR_INVALID_PARAMETER;
1722 rc = GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1723 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1724 break;
1725
1726 case VMMR0_DO_GMM_RESET_STATISTICS:
1727 if (u64Arg)
1728 return VERR_INVALID_PARAMETER;
1729 rc = GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1730 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1731 break;
1732
1733 /*
1734 * A quick GCFGM mock-up.
1735 */
1736 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1737 case VMMR0_DO_GCFGM_SET_VALUE:
1738 case VMMR0_DO_GCFGM_QUERY_VALUE:
1739 {
1740 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1741 return VERR_INVALID_PARAMETER;
1742 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1743 if (pReq->Hdr.cbReq != sizeof(*pReq))
1744 return VERR_INVALID_PARAMETER;
1745 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1746 {
1747 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1748 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1749 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1750 }
1751 else
1752 {
1753 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1754 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1755 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1756 }
1757 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1758 break;
1759 }
1760
1761 /*
1762 * PDM Wrappers.
1763 */
1764 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1765 {
1766 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1767 return VERR_INVALID_PARAMETER;
1768 rc = PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1769 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1770 break;
1771 }
1772
1773 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1774 {
1775 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1776 return VERR_INVALID_PARAMETER;
1777 rc = PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1778 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1779 break;
1780 }
1781
1782 /*
1783 * Requests to the internal networking service.
1784 */
1785 case VMMR0_DO_INTNET_OPEN:
1786 {
1787 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1788 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1789 return VERR_INVALID_PARAMETER;
1790 rc = IntNetR0OpenReq(pSession, pReq);
1791 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1792 break;
1793 }
1794
1795 case VMMR0_DO_INTNET_IF_CLOSE:
1796 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1797 return VERR_INVALID_PARAMETER;
1798 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1799 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1800 break;
1801
1802
1803 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1804 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1805 return VERR_INVALID_PARAMETER;
1806 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1807 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1808 break;
1809
1810 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1811 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1812 return VERR_INVALID_PARAMETER;
1813 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1814 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1815 break;
1816
1817 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1818 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1819 return VERR_INVALID_PARAMETER;
1820 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1821 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1822 break;
1823
1824 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1825 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1826 return VERR_INVALID_PARAMETER;
1827 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1828 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1829 break;
1830
1831 case VMMR0_DO_INTNET_IF_SEND:
1832 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1833 return VERR_INVALID_PARAMETER;
1834 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1835 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1836 break;
1837
1838 case VMMR0_DO_INTNET_IF_WAIT:
1839 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1840 return VERR_INVALID_PARAMETER;
1841 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1842 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1843 break;
1844
1845 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1846 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1847 return VERR_INVALID_PARAMETER;
1848 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1849 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1850 break;
1851
1852#ifdef VBOX_WITH_PCI_PASSTHROUGH
1853 /*
1854 * Requests to host PCI driver service.
1855 */
1856 case VMMR0_DO_PCIRAW_REQ:
1857 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1858 return VERR_INVALID_PARAMETER;
1859 rc = PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1860 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1861 break;
1862#endif
1863 /*
1864 * For profiling.
1865 */
1866 case VMMR0_DO_NOP:
1867 case VMMR0_DO_SLOW_NOP:
1868 return VINF_SUCCESS;
1869
1870 /*
1871 * For testing Ring-0 APIs invoked in this environment.
1872 */
1873 case VMMR0_DO_TESTS:
1874 /** @todo make new test */
1875 return VINF_SUCCESS;
1876
1877
1878#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1879 case VMMR0_DO_TEST_SWITCHER3264:
1880 if (idCpu == NIL_VMCPUID)
1881 return VERR_INVALID_CPU_ID;
1882 rc = HMR0TestSwitcher3264(pVM);
1883 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1884 break;
1885#endif
1886 default:
1887 /*
1888 * We're returning VERR_NOT_SUPPORT here so we've got something else
1889 * than -1 which the interrupt gate glue code might return.
1890 */
1891 Log(("operation %#x is not supported\n", enmOperation));
1892 return VERR_NOT_SUPPORTED;
1893 }
1894 return rc;
1895}
1896
1897
1898/**
1899 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1900 */
1901typedef struct VMMR0ENTRYEXARGS
1902{
1903 PVM pVM;
1904 VMCPUID idCpu;
1905 VMMR0OPERATION enmOperation;
1906 PSUPVMMR0REQHDR pReq;
1907 uint64_t u64Arg;
1908 PSUPDRVSESSION pSession;
1909} VMMR0ENTRYEXARGS;
1910/** Pointer to a vmmR0EntryExWrapper argument package. */
1911typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1912
1913/**
1914 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1915 *
1916 * @returns VBox status code.
1917 * @param pvArgs The argument package
1918 */
1919static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1920{
1921 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1922 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1923 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1924 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1925 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1926 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1927}
1928
1929
1930/**
1931 * The Ring 0 entry point, called by the support library (SUP).
1932 *
1933 * @returns VBox status code.
1934 * @param pVM The cross context VM structure.
1935 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1936 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1937 * @param enmOperation Which operation to execute.
1938 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1939 * @param u64Arg Some simple constant argument.
1940 * @param pSession The session of the caller.
1941 * @remarks Assume called with interrupts _enabled_.
1942 */
1943VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1944{
1945 /*
1946 * Requests that should only happen on the EMT thread will be
1947 * wrapped in a setjmp so we can assert without causing trouble.
1948 */
1949 if ( VALID_PTR(pVM)
1950 && pVM->pVMR0
1951 && idCpu < pVM->cCpus)
1952 {
1953 switch (enmOperation)
1954 {
1955 /* These might/will be called before VMMR3Init. */
1956 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1957 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1958 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1959 case VMMR0_DO_GMM_FREE_PAGES:
1960 case VMMR0_DO_GMM_BALLOONED_PAGES:
1961 /* On the mac we might not have a valid jmp buf, so check these as well. */
1962 case VMMR0_DO_VMMR0_INIT:
1963 case VMMR0_DO_VMMR0_TERM:
1964 {
1965 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1966
1967 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1968 break;
1969
1970 /** @todo validate this EMT claim... GVM knows. */
1971 VMMR0ENTRYEXARGS Args;
1972 Args.pVM = pVM;
1973 Args.idCpu = idCpu;
1974 Args.enmOperation = enmOperation;
1975 Args.pReq = pReq;
1976 Args.u64Arg = u64Arg;
1977 Args.pSession = pSession;
1978 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1979 }
1980
1981 default:
1982 break;
1983 }
1984 }
1985 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1986}
1987
1988
1989/**
1990 * Checks whether we've armed the ring-0 long jump machinery.
1991 *
1992 * @returns @c true / @c false
1993 * @param pVCpu The cross context virtual CPU structure.
1994 * @thread EMT
1995 * @sa VMMIsLongJumpArmed
1996 */
1997VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
1998{
1999#ifdef RT_ARCH_X86
2000 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2001 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2002#else
2003 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2004 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2005#endif
2006}
2007
2008
2009/**
2010 * Checks whether we've done a ring-3 long jump.
2011 *
2012 * @returns @c true / @c false
2013 * @param pVCpu The cross context virtual CPU structure.
2014 * @thread EMT
2015 */
2016VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2017{
2018 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2019}
2020
2021
2022/**
2023 * Internal R0 logger worker: Flush logger.
2024 *
2025 * @param pLogger The logger instance to flush.
2026 * @remark This function must be exported!
2027 */
2028VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2029{
2030#ifdef LOG_ENABLED
2031 /*
2032 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2033 * (This is a bit paranoid code.)
2034 */
2035 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2036 if ( !VALID_PTR(pR0Logger)
2037 || !VALID_PTR(pR0Logger + 1)
2038 || pLogger->u32Magic != RTLOGGER_MAGIC)
2039 {
2040# ifdef DEBUG
2041 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2042# endif
2043 return;
2044 }
2045 if (pR0Logger->fFlushingDisabled)
2046 return; /* quietly */
2047
2048 PVM pVM = pR0Logger->pVM;
2049 if ( !VALID_PTR(pVM)
2050 || pVM->pVMR0 != pVM)
2051 {
2052# ifdef DEBUG
2053 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2054# endif
2055 return;
2056 }
2057
2058 PVMCPU pVCpu = VMMGetCpu(pVM);
2059 if (pVCpu)
2060 {
2061 /*
2062 * Check that the jump buffer is armed.
2063 */
2064# ifdef RT_ARCH_X86
2065 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2066 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2067# else
2068 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2069 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2070# endif
2071 {
2072# ifdef DEBUG
2073 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2074# endif
2075 return;
2076 }
2077 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2078 }
2079# ifdef DEBUG
2080 else
2081 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2082# endif
2083#else
2084 NOREF(pLogger);
2085#endif /* LOG_ENABLED */
2086}
2087
2088/**
2089 * Internal R0 logger worker: Custom prefix.
2090 *
2091 * @returns Number of chars written.
2092 *
2093 * @param pLogger The logger instance.
2094 * @param pchBuf The output buffer.
2095 * @param cchBuf The size of the buffer.
2096 * @param pvUser User argument (ignored).
2097 */
2098VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2099{
2100 NOREF(pvUser);
2101#ifdef LOG_ENABLED
2102 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2103 if ( !VALID_PTR(pR0Logger)
2104 || !VALID_PTR(pR0Logger + 1)
2105 || pLogger->u32Magic != RTLOGGER_MAGIC
2106 || cchBuf < 2)
2107 return 0;
2108
2109 static const char s_szHex[17] = "0123456789abcdef";
2110 VMCPUID const idCpu = pR0Logger->idCpu;
2111 pchBuf[1] = s_szHex[ idCpu & 15];
2112 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2113
2114 return 2;
2115#else
2116 NOREF(pLogger); NOREF(pchBuf); NOREF(cchBuf);
2117 return 0;
2118#endif
2119}
2120
2121#ifdef LOG_ENABLED
2122
2123/**
2124 * Disables flushing of the ring-0 debug log.
2125 *
2126 * @param pVCpu The cross context virtual CPU structure.
2127 */
2128VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2129{
2130 if (pVCpu->vmm.s.pR0LoggerR0)
2131 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2132}
2133
2134
2135/**
2136 * Enables flushing of the ring-0 debug log.
2137 *
2138 * @param pVCpu The cross context virtual CPU structure.
2139 */
2140VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2141{
2142 if (pVCpu->vmm.s.pR0LoggerR0)
2143 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2144}
2145
2146
2147/**
2148 * Checks if log flushing is disabled or not.
2149 *
2150 * @param pVCpu The cross context virtual CPU structure.
2151 */
2152VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2153{
2154 if (pVCpu->vmm.s.pR0LoggerR0)
2155 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2156 return true;
2157}
2158#endif /* LOG_ENABLED */
2159
2160/**
2161 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2162 *
2163 * @returns true if the breakpoint should be hit, false if it should be ignored.
2164 */
2165DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2166{
2167#if 0
2168 return true;
2169#else
2170 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2171 if (pVM)
2172 {
2173 PVMCPU pVCpu = VMMGetCpu(pVM);
2174
2175 if (pVCpu)
2176 {
2177#ifdef RT_ARCH_X86
2178 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2179 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2180#else
2181 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2182 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2183#endif
2184 {
2185 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2186 return RT_FAILURE_NP(rc);
2187 }
2188 }
2189 }
2190#ifdef RT_OS_LINUX
2191 return true;
2192#else
2193 return false;
2194#endif
2195#endif
2196}
2197
2198
2199/**
2200 * Override this so we can push it up to ring-3.
2201 *
2202 * @param pszExpr Expression. Can be NULL.
2203 * @param uLine Location line number.
2204 * @param pszFile Location file name.
2205 * @param pszFunction Location function name.
2206 */
2207DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2208{
2209 /*
2210 * To the log.
2211 */
2212 LogAlways(("\n!!R0-Assertion Failed!!\n"
2213 "Expression: %s\n"
2214 "Location : %s(%d) %s\n",
2215 pszExpr, pszFile, uLine, pszFunction));
2216
2217 /*
2218 * To the global VMM buffer.
2219 */
2220 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2221 if (pVM)
2222 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2223 "\n!!R0-Assertion Failed!!\n"
2224 "Expression: %s\n"
2225 "Location : %s(%d) %s\n",
2226 pszExpr, pszFile, uLine, pszFunction);
2227
2228 /*
2229 * Continue the normal way.
2230 */
2231 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2232}
2233
2234
2235/**
2236 * Callback for RTLogFormatV which writes to the ring-3 log port.
2237 * See PFNLOGOUTPUT() for details.
2238 */
2239static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2240{
2241 for (size_t i = 0; i < cbChars; i++)
2242 {
2243 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2244 }
2245
2246 NOREF(pv);
2247 return cbChars;
2248}
2249
2250
2251/**
2252 * Override this so we can push it up to ring-3.
2253 *
2254 * @param pszFormat The format string.
2255 * @param va Arguments.
2256 */
2257DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2258{
2259 va_list vaCopy;
2260
2261 /*
2262 * Push the message to the loggers.
2263 */
2264 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2265 if (pLog)
2266 {
2267 va_copy(vaCopy, va);
2268 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2269 va_end(vaCopy);
2270 }
2271 pLog = RTLogRelGetDefaultInstance();
2272 if (pLog)
2273 {
2274 va_copy(vaCopy, va);
2275 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2276 va_end(vaCopy);
2277 }
2278
2279 /*
2280 * Push it to the global VMM buffer.
2281 */
2282 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2283 if (pVM)
2284 {
2285 va_copy(vaCopy, va);
2286 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2287 va_end(vaCopy);
2288 }
2289
2290 /*
2291 * Continue the normal way.
2292 */
2293 RTAssertMsg2V(pszFormat, va);
2294}
2295
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette