VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 105898

Last change on this file since 105898 was 104840, checked in by vboxsync, 5 months ago

VMM/PGM: Refactored RAM ranges, MMIO2 ranges and ROM ranges and added MMIO ranges (to PGM) so we can safely access RAM ranges at runtime w/o fear of them ever being freed up. It is now only possible to create these during VM creation and loading, and they will live till VM destruction (except for MMIO2 which could be destroyed during loading (PCNet fun)). The lookup handling is by table instead of pointer tree. No more ring-0 pointers in shared data. bugref:10687 bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 137.2 KB
Line 
1/* $Id: VMMR0.cpp 104840 2024-06-05 00:59:51Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VMM
33#include <VBox/vmm/vmm.h>
34#include <VBox/sup.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/trpm.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pdmapi.h>
40#include <VBox/vmm/pgm.h>
41#ifdef VBOX_WITH_NEM_R0
42# include <VBox/vmm/nem.h>
43#endif
44#include <VBox/vmm/em.h>
45#include <VBox/vmm/stam.h>
46#include <VBox/vmm/tm.h>
47#include "VMMInternal.h"
48#include <VBox/vmm/vmcc.h>
49#include <VBox/vmm/gvm.h>
50#ifdef VBOX_WITH_PCI_PASSTHROUGH
51# include <VBox/vmm/pdmpci.h>
52#endif
53#include <VBox/vmm/apic.h>
54
55#include <VBox/vmm/gvmm.h>
56#include <VBox/vmm/gmm.h>
57#include <VBox/vmm/gim.h>
58#include <VBox/intnet.h>
59#include <VBox/vmm/hm.h>
60#include <VBox/param.h>
61#include <VBox/err.h>
62#include <VBox/version.h>
63#include <VBox/log.h>
64
65#include <iprt/asm-amd64-x86.h>
66#include <iprt/assert.h>
67#include <iprt/crc.h>
68#include <iprt/initterm.h>
69#include <iprt/mem.h>
70#include <iprt/memobj.h>
71#include <iprt/mp.h>
72#include <iprt/once.h>
73#include <iprt/semaphore.h>
74#include <iprt/spinlock.h>
75#include <iprt/stdarg.h>
76#include <iprt/string.h>
77#include <iprt/thread.h>
78#include <iprt/timer.h>
79#include <iprt/time.h>
80
81#include "dtrace/VBoxVMM.h"
82
83
84#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
85# pragma intrinsic(_AddressOfReturnAddress)
86#endif
87
88#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
89# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
90#endif
91
92
93/*********************************************************************************************************************************
94* Internal Functions *
95*********************************************************************************************************************************/
96RT_C_DECLS_BEGIN
97#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
98extern uint64_t __udivdi3(uint64_t, uint64_t);
99extern uint64_t __umoddi3(uint64_t, uint64_t);
100#endif
101RT_C_DECLS_END
102static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, uint64_t fFlags);
103static int vmmR0LogFlusher(PGVM pGVM);
104static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger);
105static int vmmR0InitLoggers(PGVM pGVM);
106static void vmmR0CleanupLoggers(PGVM pGVM);
107
108
109/*********************************************************************************************************************************
110* Global Variables *
111*********************************************************************************************************************************/
112/** Drag in necessary library bits.
113 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
114struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
115{
116 { (PFNRT)RTCrc32 },
117 { (PFNRT)RTOnce },
118#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
119 { (PFNRT)__udivdi3 },
120 { (PFNRT)__umoddi3 },
121#endif
122 { NULL }
123};
124
125#ifdef RT_OS_SOLARIS
126/* Dependency information for the native solaris loader. */
127extern "C" { char _depends_on[] = "vboxdrv"; }
128#endif
129
130
131/**
132 * Initialize the module.
133 * This is called when we're first loaded.
134 *
135 * @returns 0 on success.
136 * @returns VBox status on failure.
137 * @param hMod Image handle for use in APIs.
138 */
139DECLEXPORT(int) ModuleInit(void *hMod)
140{
141#ifdef VBOX_WITH_DTRACE_R0
142 /*
143 * The first thing to do is register the static tracepoints.
144 * (Deregistration is automatic.)
145 */
146 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
147 if (RT_FAILURE(rc2))
148 return rc2;
149#endif
150 LogFlow(("ModuleInit:\n"));
151
152#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
153 /*
154 * Display the CMOS debug code.
155 */
156 ASMOutU8(0x72, 0x03);
157 uint8_t bDebugCode = ASMInU8(0x73);
158 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
159 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
160#endif
161
162 /*
163 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
164 */
165 int rc = vmmInitFormatTypes();
166 if (RT_SUCCESS(rc))
167 {
168 rc = GVMMR0Init();
169 if (RT_SUCCESS(rc))
170 {
171 rc = GMMR0Init();
172 if (RT_SUCCESS(rc))
173 {
174 rc = HMR0Init();
175 if (RT_SUCCESS(rc))
176 {
177 PDMR0Init(hMod);
178
179 rc = PGMRegisterStringFormatTypes();
180 if (RT_SUCCESS(rc))
181 {
182 rc = IntNetR0Init();
183 if (RT_SUCCESS(rc))
184 {
185#ifdef VBOX_WITH_PCI_PASSTHROUGH
186 rc = PciRawR0Init();
187#endif
188 if (RT_SUCCESS(rc))
189 {
190 rc = CPUMR0ModuleInit();
191 if (RT_SUCCESS(rc))
192 {
193#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
194 rc = vmmR0TripleFaultHackInit();
195 if (RT_SUCCESS(rc))
196#endif
197 {
198#ifdef VBOX_WITH_NEM_R0
199 rc = NEMR0Init();
200 if (RT_SUCCESS(rc))
201#endif
202 {
203 LogFlow(("ModuleInit: returns success\n"));
204 return VINF_SUCCESS;
205 }
206 }
207
208 /*
209 * Bail out.
210 */
211#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
212 vmmR0TripleFaultHackTerm();
213#endif
214 }
215 else
216 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
217#ifdef VBOX_WITH_PCI_PASSTHROUGH
218 PciRawR0Term();
219#endif
220 }
221 else
222 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
223 IntNetR0Term();
224 }
225 else
226 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
227 PGMDeregisterStringFormatTypes();
228 }
229 else
230 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
231 HMR0Term();
232 }
233 else
234 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
235 GMMR0Term();
236 }
237 else
238 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
239 GVMMR0Term();
240 }
241 else
242 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
243 vmmTermFormatTypes();
244 }
245 else
246 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
247
248 LogFlow(("ModuleInit: failed %Rrc\n", rc));
249 return rc;
250}
251
252
253/**
254 * Terminate the module.
255 * This is called when we're finally unloaded.
256 *
257 * @param hMod Image handle for use in APIs.
258 */
259DECLEXPORT(void) ModuleTerm(void *hMod)
260{
261 NOREF(hMod);
262 LogFlow(("ModuleTerm:\n"));
263
264 /*
265 * Terminate the CPUM module (Local APIC cleanup).
266 */
267 CPUMR0ModuleTerm();
268
269 /*
270 * Terminate the internal network service.
271 */
272 IntNetR0Term();
273
274 /*
275 * PGM (Darwin), HM and PciRaw global cleanup.
276 */
277#ifdef VBOX_WITH_PCI_PASSTHROUGH
278 PciRawR0Term();
279#endif
280 PGMDeregisterStringFormatTypes();
281 HMR0Term();
282#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
283 vmmR0TripleFaultHackTerm();
284#endif
285#ifdef VBOX_WITH_NEM_R0
286 NEMR0Term();
287#endif
288
289 /*
290 * Destroy the GMM and GVMM instances.
291 */
292 GMMR0Term();
293 GVMMR0Term();
294
295 vmmTermFormatTypes();
296 RTTermRunCallbacks(RTTERMREASON_UNLOAD, 0);
297
298 LogFlow(("ModuleTerm: returns\n"));
299}
300
301
302/**
303 * Initializes VMM specific members when the GVM structure is created,
304 * allocating loggers and stuff.
305 *
306 * The loggers are allocated here so that we can update their settings before
307 * doing VMMR0_DO_VMMR0_INIT and have correct logging at that time.
308 *
309 * @returns VBox status code.
310 * @param pGVM The global (ring-0) VM structure.
311 */
312VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM)
313{
314 AssertCompile(sizeof(pGVM->vmmr0.s) <= sizeof(pGVM->vmmr0.padding));
315
316 /*
317 * Initialize all members first.
318 */
319 pGVM->vmmr0.s.fCalledInitVm = false;
320 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
321 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
322 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
323 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
324 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
325 pGVM->vmmr0.s.LogFlusher.hThread = NIL_RTNATIVETHREAD;
326 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
327 pGVM->vmmr0.s.LogFlusher.idxRingHead = 0;
328 pGVM->vmmr0.s.LogFlusher.idxRingTail = 0;
329 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
330
331 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
332 {
333 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
334 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
335 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
336 pGVCpu->vmmr0.s.pPreemptState = NULL;
337 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
338 pGVCpu->vmmr0.s.AssertJmpBuf.pMirrorBuf = &pGVCpu->vmm.s.AssertJmpBuf;
339 pGVCpu->vmmr0.s.AssertJmpBuf.pvStackBuf = &pGVCpu->vmm.s.abAssertStack[0];
340 pGVCpu->vmmr0.s.AssertJmpBuf.cbStackBuf = sizeof(pGVCpu->vmm.s.abAssertStack);
341
342 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
343 pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT;
344 }
345
346 /*
347 * Create the loggers.
348 */
349 return vmmR0InitLoggers(pGVM);
350}
351
352
353/**
354 * Initiates the R0 driver for a particular VM instance.
355 *
356 * @returns VBox status code.
357 *
358 * @param pGVM The global (ring-0) VM structure.
359 * @param uSvnRev The SVN revision of the ring-3 part.
360 * @param uBuildType Build type indicator.
361 * @thread EMT(0)
362 */
363static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
364{
365 /*
366 * Match the SVN revisions and build type.
367 */
368 if (uSvnRev != VMMGetSvnRev())
369 {
370 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
371 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
372 return VERR_VMM_R0_VERSION_MISMATCH;
373 }
374 if (uBuildType != vmmGetBuildType())
375 {
376 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
377 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
378 return VERR_VMM_R0_VERSION_MISMATCH;
379 }
380
381 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
382 if (RT_FAILURE(rc))
383 return rc;
384
385 /* Don't allow this to be called more than once. */
386 if (!pGVM->vmmr0.s.fCalledInitVm)
387 pGVM->vmmr0.s.fCalledInitVm = true;
388 else
389 return VERR_ALREADY_INITIALIZED;
390
391#ifdef LOG_ENABLED
392
393 /*
394 * Register the EMT R0 logger instance for VCPU 0.
395 */
396 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
397 if (pVCpu->vmmr0.s.u.s.Logger.pLogger)
398 {
399# if 0 /* testing of the logger. */
400 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
401 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
402 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
403 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
404
405 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
406 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
407 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
408 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
409
410 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
411 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
412 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
413 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
414
415 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
416 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
417 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
418 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
419 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
420 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
421
422 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
423 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
424
425 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
426 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
427 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
428# endif
429# ifdef VBOX_WITH_R0_LOGGING
430 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.u.s.Logger.pLogger, pGVM->pSession));
431 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
432 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
433# endif
434 }
435#endif /* LOG_ENABLED */
436
437 /*
438 * Check if the host supports high resolution timers or not.
439 */
440 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
441 && !RTTimerCanDoHighResolution())
442 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
443
444 /*
445 * Initialize the per VM data for GVMM and GMM.
446 */
447 rc = GVMMR0InitVM(pGVM);
448 if (RT_SUCCESS(rc))
449 {
450 /*
451 * Init HM, CPUM and PGM.
452 */
453 rc = HMR0InitVM(pGVM);
454 if (RT_SUCCESS(rc))
455 {
456 rc = CPUMR0InitVM(pGVM);
457 if (RT_SUCCESS(rc))
458 {
459 rc = PGMR0InitVM(pGVM);
460 if (RT_SUCCESS(rc))
461 {
462 rc = EMR0InitVM(pGVM);
463 if (RT_SUCCESS(rc))
464 {
465 rc = IEMR0InitVM(pGVM);
466 if (RT_SUCCESS(rc))
467 {
468 rc = IOMR0InitVM(pGVM);
469 if (RT_SUCCESS(rc))
470 {
471#ifdef VBOX_WITH_PCI_PASSTHROUGH
472 rc = PciRawR0InitVM(pGVM);
473#endif
474 if (RT_SUCCESS(rc))
475 {
476 rc = GIMR0InitVM(pGVM);
477 if (RT_SUCCESS(rc))
478 {
479 GVMMR0DoneInitVM(pGVM);
480 PGMR0DoneInitVM(pGVM);
481
482 /*
483 * Collect a bit of info for the VM release log.
484 */
485 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
486 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
487 return rc;
488
489 /* bail out*/
490 //GIMR0TermVM(pGVM);
491 }
492#ifdef VBOX_WITH_PCI_PASSTHROUGH
493 PciRawR0TermVM(pGVM);
494#endif
495 }
496 }
497 }
498 }
499 }
500 }
501 HMR0TermVM(pGVM);
502 }
503 }
504
505 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
506 return rc;
507}
508
509
510/**
511 * Does EMT specific VM initialization.
512 *
513 * @returns VBox status code.
514 * @param pGVM The ring-0 VM structure.
515 * @param idCpu The EMT that's calling.
516 */
517static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
518{
519 /* Paranoia (caller checked these already). */
520 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
521 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
522
523#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
524 /*
525 * Registration of ring 0 loggers.
526 */
527 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
528 if ( pVCpu->vmmr0.s.u.s.Logger.pLogger
529 && !pVCpu->vmmr0.s.u.s.Logger.fRegistered)
530 {
531 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
532 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
533 }
534#endif
535
536 return VINF_SUCCESS;
537}
538
539
540
541/**
542 * Terminates the R0 bits for a particular VM instance.
543 *
544 * This is normally called by ring-3 as part of the VM termination process, but
545 * may alternatively be called during the support driver session cleanup when
546 * the VM object is destroyed (see GVMM).
547 *
548 * @returns VBox status code.
549 *
550 * @param pGVM The global (ring-0) VM structure.
551 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
552 * thread.
553 * @thread EMT(0) or session clean up thread.
554 */
555VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
556{
557 /*
558 * Check EMT(0) claim if we're called from userland.
559 */
560 if (idCpu != NIL_VMCPUID)
561 {
562 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
563 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
564 if (RT_FAILURE(rc))
565 return rc;
566 }
567
568#ifdef VBOX_WITH_PCI_PASSTHROUGH
569 PciRawR0TermVM(pGVM);
570#endif
571
572 /*
573 * Tell GVMM what we're up to and check that we only do this once.
574 */
575 if (GVMMR0DoingTermVM(pGVM))
576 {
577 GIMR0TermVM(pGVM);
578
579 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
580 * here to make sure we don't leak any shared pages if we crash... */
581 HMR0TermVM(pGVM);
582 }
583
584 /*
585 * Deregister the logger for this EMT.
586 */
587 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
588
589 /*
590 * Start log flusher thread termination.
591 */
592 ASMAtomicWriteBool(&pGVM->vmmr0.s.LogFlusher.fThreadShutdown, true);
593 if (pGVM->vmmr0.s.LogFlusher.hEvent != NIL_RTSEMEVENT)
594 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
595
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * This is called at the end of gvmmR0CleanupVM().
602 *
603 * @param pGVM The global (ring-0) VM structure.
604 */
605VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
606{
607 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
608 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
609 {
610 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
611
612 /** @todo Can we busy wait here for all thread-context hooks to be
613 * deregistered before releasing (destroying) it? Only until we find a
614 * solution for not deregistering hooks everytime we're leaving HMR0
615 * context. */
616 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
617 }
618
619 vmmR0CleanupLoggers(pGVM);
620}
621
622
623/**
624 * An interrupt or unhalt force flag is set, deal with it.
625 *
626 * @returns VINF_SUCCESS (or VINF_EM_HALT).
627 * @param pVCpu The cross context virtual CPU structure.
628 * @param uMWait Result from EMMonitorWaitIsActive().
629 * @param enmInterruptibility Guest CPU interruptbility level.
630 */
631static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
632{
633 Assert(!TRPMHasTrap(pVCpu));
634 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
635 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
636
637 /*
638 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
639 */
640 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
641 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
642 {
643 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
644 {
645 uint8_t u8Interrupt = 0;
646 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
647 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
648 if (RT_SUCCESS(rc))
649 {
650 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
651
652 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
653 AssertRCSuccess(rc);
654 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
655 return rc;
656 }
657 }
658 }
659 /*
660 * SMI is not implemented yet, at least not here.
661 */
662 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
663 {
664 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
665 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
666 return VINF_EM_HALT;
667 }
668 /*
669 * NMI.
670 */
671 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
672 {
673 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
674 {
675 /** @todo later. */
676 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
677 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
678 return VINF_EM_HALT;
679 }
680 }
681 /*
682 * Nested-guest virtual interrupt.
683 */
684 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
685 {
686 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
687 {
688 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
689 * here before injecting the virtual interrupt. See emR3ForcedActions
690 * for details. */
691 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
692 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
693 return VINF_EM_HALT;
694 }
695 }
696
697 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
698 {
699 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
700 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
701 return VINF_SUCCESS;
702 }
703 if (uMWait > 1)
704 {
705 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
706 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
707 return VINF_SUCCESS;
708 }
709
710 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
711 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
712 return VINF_EM_HALT;
713}
714
715
716/**
717 * This does one round of vmR3HaltGlobal1Halt().
718 *
719 * The rational here is that we'll reduce latency in interrupt situations if we
720 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
721 * MWAIT), but do one round of blocking here instead and hope the interrupt is
722 * raised in the meanwhile.
723 *
724 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
725 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
726 * ring-0 call (unless we're too close to a timer event). When the interrupt
727 * wakes us up, we'll return from ring-0 and EM will by instinct do a
728 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
729 * back to VMMR0EntryFast().
730 *
731 * @returns VINF_SUCCESS or VINF_EM_HALT.
732 * @param pGVM The ring-0 VM structure.
733 * @param pGVCpu The ring-0 virtual CPU structure.
734 *
735 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
736 * the VM module, probably to VMM. Then this would be more weird wrt
737 * parameters and statistics.
738 */
739static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
740{
741 /*
742 * Do spin stat historization.
743 */
744 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
745 { /* likely */ }
746 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
747 {
748 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
749 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
750 }
751 else
752 {
753 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
754 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
755 }
756
757 /*
758 * Flags that makes us go to ring-3.
759 */
760 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
761 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
762 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
763 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
764 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
765 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
766 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
767 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
768
769 /*
770 * Check preconditions.
771 */
772 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
773 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
774 if ( pGVCpu->vmm.s.fMayHaltInRing0
775 && !TRPMHasTrap(pGVCpu)
776 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
777 || uMWait > 1))
778 {
779 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
780 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
781 {
782 /*
783 * Interrupts pending already?
784 */
785 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
786 APICUpdatePendingInterrupts(pGVCpu);
787
788 /*
789 * Flags that wake up from the halted state.
790 */
791 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
792 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
793
794 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
795 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
796 ASMNopPause();
797
798 /*
799 * Check out how long till the next timer event.
800 */
801 uint64_t u64Delta;
802 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
803
804 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
805 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
806 {
807 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
808 APICUpdatePendingInterrupts(pGVCpu);
809
810 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
811 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
812
813 /*
814 * Wait if there is enough time to the next timer event.
815 */
816 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
817 {
818 /* If there are few other CPU cores around, we will procrastinate a
819 little before going to sleep, hoping for some device raising an
820 interrupt or similar. Though, the best thing here would be to
821 dynamically adjust the spin count according to its usfulness or
822 something... */
823 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
824 && RTMpGetOnlineCount() >= 4)
825 {
826 /** @todo Figure out how we can skip this if it hasn't help recently...
827 * @bugref{9172#c12} */
828 uint32_t cSpinLoops = 42;
829 while (cSpinLoops-- > 0)
830 {
831 ASMNopPause();
832 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
833 APICUpdatePendingInterrupts(pGVCpu);
834 ASMNopPause();
835 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
836 {
837 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
838 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
839 return VINF_EM_HALT;
840 }
841 ASMNopPause();
842 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
843 {
844 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
845 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
846 return VINF_EM_HALT;
847 }
848 ASMNopPause();
849 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
850 {
851 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
852 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
853 }
854 ASMNopPause();
855 }
856 }
857
858 /*
859 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
860 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
861 * After changing the state we must recheck the force flags of course.
862 */
863 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
864 {
865 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
866 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
867 {
868 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
869 APICUpdatePendingInterrupts(pGVCpu);
870
871 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
872 {
873 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
874 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
875 }
876
877 /* Okay, block! */
878 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
879 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
880 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
881 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
882 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
883
884 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
885 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
886 if ( rc == VINF_SUCCESS
887 || rc == VERR_INTERRUPTED)
888 {
889 /* Keep some stats like ring-3 does. */
890 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
891 if (cNsOverslept > 50000)
892 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
893 else if (cNsOverslept < -50000)
894 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
895 else
896 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
897
898 /*
899 * Recheck whether we can resume execution or have to go to ring-3.
900 */
901 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
902 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
903 {
904 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
905 APICUpdatePendingInterrupts(pGVCpu);
906 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
907 {
908 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
909 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
910 }
911 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
912 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
913 }
914 else
915 {
916 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
917 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
918 }
919 }
920 else
921 {
922 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
923 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
924 }
925 }
926 else
927 {
928 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
929 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
930 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
931 }
932 }
933 else
934 {
935 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
936 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
937 }
938 }
939 else
940 {
941 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
942 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
943 }
944 }
945 else
946 {
947 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
948 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
949 }
950 }
951 else
952 {
953 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
954 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
955 }
956 }
957 else
958 {
959 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
960 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
961 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
962 }
963
964 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
965 return VINF_EM_HALT;
966}
967
968
969/**
970 * VMM ring-0 thread-context callback.
971 *
972 * This does common HM state updating and calls the HM-specific thread-context
973 * callback.
974 *
975 * This is used together with RTThreadCtxHookCreate() on platforms which
976 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
977 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
978 *
979 * @param enmEvent The thread-context event.
980 * @param pvUser Opaque pointer to the VMCPU.
981 *
982 * @thread EMT(pvUser)
983 */
984static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
985{
986 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
987
988 switch (enmEvent)
989 {
990 case RTTHREADCTXEVENT_IN:
991 {
992 /*
993 * Linux may call us with preemption enabled (really!) but technically we
994 * cannot get preempted here, otherwise we end up in an infinite recursion
995 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
996 * ad infinitum). Let's just disable preemption for now...
997 */
998 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
999 * preemption after doing the callout (one or two functions up the
1000 * call chain). */
1001 /** @todo r=ramshankar: See @bugref{5313#c30}. */
1002 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1003 RTThreadPreemptDisable(&ParanoidPreemptState);
1004
1005 /* We need to update the VCPU <-> host CPU mapping. */
1006 RTCPUID idHostCpu;
1007 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1008 pVCpu->iHostCpuSet = iHostCpuSet;
1009 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1010
1011 /* In the very unlikely event that the GIP delta for the CPU we're
1012 rescheduled needs calculating, try force a return to ring-3.
1013 We unfortunately cannot do the measurements right here. */
1014 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1015 { /* likely */ }
1016 else
1017 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1018
1019 /* Invoke the HM-specific thread-context callback. */
1020 HMR0ThreadCtxCallback(enmEvent, pvUser);
1021
1022 /* Restore preemption. */
1023 RTThreadPreemptRestore(&ParanoidPreemptState);
1024 break;
1025 }
1026
1027 case RTTHREADCTXEVENT_OUT:
1028 {
1029 /* Invoke the HM-specific thread-context callback. */
1030 HMR0ThreadCtxCallback(enmEvent, pvUser);
1031
1032 /*
1033 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1034 * have the same host CPU associated with it.
1035 */
1036 pVCpu->iHostCpuSet = UINT32_MAX;
1037 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1038 break;
1039 }
1040
1041 default:
1042 /* Invoke the HM-specific thread-context callback. */
1043 HMR0ThreadCtxCallback(enmEvent, pvUser);
1044 break;
1045 }
1046}
1047
1048
1049/**
1050 * Creates thread switching hook for the current EMT thread.
1051 *
1052 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1053 * platform does not implement switcher hooks, no hooks will be create and the
1054 * member set to NIL_RTTHREADCTXHOOK.
1055 *
1056 * @returns VBox status code.
1057 * @param pVCpu The cross context virtual CPU structure.
1058 * @thread EMT(pVCpu)
1059 */
1060VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1061{
1062 VMCPU_ASSERT_EMT(pVCpu);
1063 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1064
1065#if 1 /* To disable this stuff change to zero. */
1066 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1067 if (RT_SUCCESS(rc))
1068 {
1069 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1070 return rc;
1071 }
1072#else
1073 RT_NOREF(vmmR0ThreadCtxCallback);
1074 int rc = VERR_NOT_SUPPORTED;
1075#endif
1076
1077 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1078 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1079 if (rc == VERR_NOT_SUPPORTED)
1080 return VINF_SUCCESS;
1081
1082 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1083 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1084}
1085
1086
1087/**
1088 * Destroys the thread switching hook for the specified VCPU.
1089 *
1090 * @param pVCpu The cross context virtual CPU structure.
1091 * @remarks Can be called from any thread.
1092 */
1093VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1094{
1095 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1096 AssertRC(rc);
1097 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1098}
1099
1100
1101/**
1102 * Disables the thread switching hook for this VCPU (if we got one).
1103 *
1104 * @param pVCpu The cross context virtual CPU structure.
1105 * @thread EMT(pVCpu)
1106 *
1107 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1108 * this call. This means you have to be careful with what you do!
1109 */
1110VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1111{
1112 /*
1113 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1114 * @bugref{7726#c19} explains the need for this trick:
1115 *
1116 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1117 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1118 * longjmp & normal return to ring-3, which opens a window where we may be
1119 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1120 * the CPU starts executing a different EMT. Both functions first disables
1121 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1122 * an opening for getting preempted.
1123 */
1124 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1125 * all the time. */
1126
1127 /*
1128 * Disable the context hook, if we got one.
1129 */
1130 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1131 {
1132 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1133 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1134 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1135 AssertRC(rc);
1136 }
1137}
1138
1139
1140/**
1141 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1142 *
1143 * @returns true if registered, false otherwise.
1144 * @param pVCpu The cross context virtual CPU structure.
1145 */
1146DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1147{
1148 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1149}
1150
1151
1152/**
1153 * Whether thread-context hooks are registered for this VCPU.
1154 *
1155 * @returns true if registered, false otherwise.
1156 * @param pVCpu The cross context virtual CPU structure.
1157 */
1158VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1159{
1160 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1161}
1162
1163
1164/**
1165 * Returns the ring-0 release logger instance.
1166 *
1167 * @returns Pointer to release logger, NULL if not configured.
1168 * @param pVCpu The cross context virtual CPU structure of the caller.
1169 * @thread EMT(pVCpu)
1170 */
1171VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1172{
1173 return pVCpu->vmmr0.s.u.s.RelLogger.pLogger;
1174}
1175
1176
1177#ifdef VBOX_WITH_STATISTICS
1178/**
1179 * Record return code statistics
1180 * @param pVM The cross context VM structure.
1181 * @param pVCpu The cross context virtual CPU structure.
1182 * @param rc The status code.
1183 */
1184static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1185{
1186 /*
1187 * Collect statistics.
1188 */
1189 switch (rc)
1190 {
1191 case VINF_SUCCESS:
1192 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1193 break;
1194 case VINF_EM_RAW_INTERRUPT:
1195 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1196 break;
1197 case VINF_EM_RAW_INTERRUPT_HYPER:
1198 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1199 break;
1200 case VINF_EM_RAW_GUEST_TRAP:
1201 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1202 break;
1203 case VINF_EM_RAW_RING_SWITCH:
1204 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1205 break;
1206 case VINF_EM_RAW_RING_SWITCH_INT:
1207 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1208 break;
1209 case VINF_EM_RAW_STALE_SELECTOR:
1210 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1211 break;
1212 case VINF_EM_RAW_IRET_TRAP:
1213 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1214 break;
1215 case VINF_IOM_R3_IOPORT_READ:
1216 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1217 break;
1218 case VINF_IOM_R3_IOPORT_WRITE:
1219 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1220 break;
1221 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1222 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1223 break;
1224 case VINF_IOM_R3_MMIO_READ:
1225 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1226 break;
1227 case VINF_IOM_R3_MMIO_WRITE:
1228 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1229 break;
1230 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1231 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1232 break;
1233 case VINF_IOM_R3_MMIO_READ_WRITE:
1234 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1235 break;
1236 case VINF_PATM_HC_MMIO_PATCH_READ:
1237 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1238 break;
1239 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1240 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1241 break;
1242 case VINF_CPUM_R3_MSR_READ:
1243 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1244 break;
1245 case VINF_CPUM_R3_MSR_WRITE:
1246 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1247 break;
1248 case VINF_EM_RAW_EMULATE_INSTR:
1249 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1250 break;
1251 case VINF_PATCH_EMULATE_INSTR:
1252 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1253 break;
1254 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1255 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1256 break;
1257 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1258 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1259 break;
1260 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1261 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1262 break;
1263 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1265 break;
1266 case VINF_CSAM_PENDING_ACTION:
1267 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1268 break;
1269 case VINF_PGM_SYNC_CR3:
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1271 break;
1272 case VINF_PATM_PATCH_INT3:
1273 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1274 break;
1275 case VINF_PATM_PATCH_TRAP_PF:
1276 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1277 break;
1278 case VINF_PATM_PATCH_TRAP_GP:
1279 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1280 break;
1281 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1283 break;
1284 case VINF_EM_RESCHEDULE_REM:
1285 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1286 break;
1287 case VINF_EM_RAW_TO_R3:
1288 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1289 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1290 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1291 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1292 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1293 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1294 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1295 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1296 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1297 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1298 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1299 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1300 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1301 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1302 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1303 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1304 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1305 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1306 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1307 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1308 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1309 else
1310 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1311 break;
1312
1313 case VINF_EM_RAW_TIMER_PENDING:
1314 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1315 break;
1316 case VINF_EM_RAW_INTERRUPT_PENDING:
1317 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1318 break;
1319 case VINF_PATM_DUPLICATE_FUNCTION:
1320 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1321 break;
1322 case VINF_PGM_POOL_FLUSH_PENDING:
1323 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1324 break;
1325 case VINF_EM_PENDING_REQUEST:
1326 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1327 break;
1328 case VINF_EM_HM_PATCH_TPR_INSTR:
1329 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1330 break;
1331 default:
1332 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1333 break;
1334 }
1335}
1336#endif /* VBOX_WITH_STATISTICS */
1337
1338
1339/**
1340 * The Ring 0 entry point, called by the fast-ioctl path.
1341 *
1342 * @param pGVM The global (ring-0) VM structure.
1343 * @param pVMIgnored The cross context VM structure. The return code is
1344 * stored in pVM->vmm.s.iLastGZRc.
1345 * @param idCpu The Virtual CPU ID of the calling EMT.
1346 * @param enmOperation Which operation to execute.
1347 * @remarks Assume called with interrupts _enabled_.
1348 */
1349VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1350{
1351 RT_NOREF(pVMIgnored);
1352
1353 /*
1354 * Validation.
1355 */
1356 if ( idCpu < pGVM->cCpus
1357 && pGVM->cCpus == pGVM->cCpusUnsafe)
1358 { /*likely*/ }
1359 else
1360 {
1361 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1362 return;
1363 }
1364
1365 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1366 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1367 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1368 && pGVCpu->hNativeThreadR0 == hNativeThread))
1369 { /* likely */ }
1370 else
1371 {
1372 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1373 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1374 return;
1375 }
1376
1377 /*
1378 * Perform requested operation.
1379 */
1380 switch (enmOperation)
1381 {
1382 /*
1383 * Run guest code using the available hardware acceleration technology.
1384 */
1385 case VMMR0_DO_HM_RUN:
1386 {
1387 for (;;) /* hlt loop */
1388 {
1389 /*
1390 * Disable ring-3 calls & blocking till we've successfully entered HM.
1391 * Otherwise we sometimes end up blocking at the finall Log4 statement
1392 * in VMXR0Enter, while still in a somewhat inbetween state.
1393 */
1394 VMMRZCallRing3Disable(pGVCpu);
1395
1396 /*
1397 * Disable preemption.
1398 */
1399 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1400 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1401 RTThreadPreemptDisable(&PreemptState);
1402 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1403
1404 /*
1405 * Get the host CPU identifiers, make sure they are valid and that
1406 * we've got a TSC delta for the CPU.
1407 */
1408 RTCPUID idHostCpu;
1409 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1410 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1411 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1412 {
1413 pGVCpu->iHostCpuSet = iHostCpuSet;
1414 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1415
1416 /*
1417 * Update the periodic preemption timer if it's active.
1418 */
1419 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1420 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1421
1422#ifdef VMM_R0_TOUCH_FPU
1423 /*
1424 * Make sure we've got the FPU state loaded so and we don't need to clear
1425 * CR0.TS and get out of sync with the host kernel when loading the guest
1426 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1427 */
1428 CPUMR0TouchHostFpu();
1429#endif
1430 int rc;
1431 bool fPreemptRestored = false;
1432 if (!HMR0SuspendPending())
1433 {
1434 /*
1435 * Enable the context switching hook.
1436 */
1437 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1438 {
1439 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1440 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1441 }
1442
1443 /*
1444 * Enter HM context.
1445 */
1446 rc = HMR0Enter(pGVCpu);
1447 if (RT_SUCCESS(rc))
1448 {
1449 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1450
1451 /*
1452 * When preemption hooks are in place, enable preemption now that
1453 * we're in HM context.
1454 */
1455 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1456 {
1457 fPreemptRestored = true;
1458 pGVCpu->vmmr0.s.pPreemptState = NULL;
1459 RTThreadPreemptRestore(&PreemptState);
1460 }
1461 VMMRZCallRing3Enable(pGVCpu);
1462
1463 /*
1464 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1465 */
1466 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmmr0.s.AssertJmpBuf, HMR0RunGuestCode, pGVM, pGVCpu);
1467
1468 /*
1469 * Assert sanity on the way out. Using manual assertions code here as normal
1470 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1471 */
1472 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1473 && RT_SUCCESS_NP(rc)
1474 && rc != VERR_VMM_RING0_ASSERTION ))
1475 {
1476 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1477 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1478 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1479 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1480 }
1481#if 0
1482 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1483 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1484 {
1485 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1486 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1487 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1488 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1489 }
1490#endif
1491
1492 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */
1493 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1494 }
1495 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1496
1497 /*
1498 * Invalidate the host CPU identifiers before we disable the context
1499 * hook / restore preemption.
1500 */
1501 pGVCpu->iHostCpuSet = UINT32_MAX;
1502 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1503
1504 /*
1505 * Disable context hooks. Due to unresolved cleanup issues, we
1506 * cannot leave the hooks enabled when we return to ring-3.
1507 *
1508 * Note! At the moment HM may also have disabled the hook
1509 * when we get here, but the IPRT API handles that.
1510 */
1511 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1512 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1513 }
1514 /*
1515 * The system is about to go into suspend mode; go back to ring 3.
1516 */
1517 else
1518 {
1519 pGVCpu->iHostCpuSet = UINT32_MAX;
1520 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1521 rc = VINF_EM_RAW_INTERRUPT;
1522 }
1523
1524 /** @todo When HM stops messing with the context hook state, we'll disable
1525 * preemption again before the RTThreadCtxHookDisable call. */
1526 if (!fPreemptRestored)
1527 {
1528 pGVCpu->vmmr0.s.pPreemptState = NULL;
1529 RTThreadPreemptRestore(&PreemptState);
1530 }
1531
1532 pGVCpu->vmm.s.iLastGZRc = rc;
1533
1534 /* Fire dtrace probe and collect statistics. */
1535 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1536#ifdef VBOX_WITH_STATISTICS
1537 vmmR0RecordRC(pGVM, pGVCpu, rc);
1538#endif
1539 VMMRZCallRing3Enable(pGVCpu);
1540
1541 /*
1542 * If this is a halt.
1543 */
1544 if (rc != VINF_EM_HALT)
1545 { /* we're not in a hurry for a HLT, so prefer this path */ }
1546 else
1547 {
1548 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1549 if (rc == VINF_SUCCESS)
1550 {
1551 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1552 continue;
1553 }
1554 pGVCpu->vmm.s.cR0HaltsToRing3++;
1555 }
1556 }
1557 /*
1558 * Invalid CPU set index or TSC delta in need of measuring.
1559 */
1560 else
1561 {
1562 pGVCpu->vmmr0.s.pPreemptState = NULL;
1563 pGVCpu->iHostCpuSet = UINT32_MAX;
1564 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1565 RTThreadPreemptRestore(&PreemptState);
1566
1567 VMMRZCallRing3Enable(pGVCpu);
1568
1569 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1570 {
1571 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1572 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1573 0 /*default cTries*/);
1574 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1575 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1576 else
1577 pGVCpu->vmm.s.iLastGZRc = rc;
1578 }
1579 else
1580 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1581 }
1582 break;
1583 } /* halt loop. */
1584 break;
1585 }
1586
1587#ifdef VBOX_WITH_NEM_R0
1588# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1589 case VMMR0_DO_NEM_RUN:
1590 {
1591 /*
1592 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1593 */
1594# ifdef VBOXSTRICTRC_STRICT_ENABLED
1595 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1596# else
1597 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, NEMR0RunGuestCode, pGVM, idCpu);
1598# endif
1599 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1600
1601 pGVCpu->vmm.s.iLastGZRc = rc;
1602
1603 /*
1604 * Fire dtrace probe and collect statistics.
1605 */
1606 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1607# ifdef VBOX_WITH_STATISTICS
1608 vmmR0RecordRC(pGVM, pGVCpu, rc);
1609# endif
1610 break;
1611 }
1612# endif
1613#endif
1614
1615 /*
1616 * For profiling.
1617 */
1618 case VMMR0_DO_NOP:
1619 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1620 break;
1621
1622 /*
1623 * Shouldn't happen.
1624 */
1625 default:
1626 AssertMsgFailed(("%#x\n", enmOperation));
1627 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1628 break;
1629 }
1630}
1631
1632
1633/**
1634 * Validates a session or VM session argument.
1635 *
1636 * @returns true / false accordingly.
1637 * @param pGVM The global (ring-0) VM structure.
1638 * @param pClaimedSession The session claim to validate.
1639 * @param pSession The session argument.
1640 */
1641DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1642{
1643 /* This must be set! */
1644 if (!pSession)
1645 return false;
1646
1647 /* Only one out of the two. */
1648 if (pGVM && pClaimedSession)
1649 return false;
1650 if (pGVM)
1651 pClaimedSession = pGVM->pSession;
1652 return pClaimedSession == pSession;
1653}
1654
1655
1656/**
1657 * VMMR0EntryEx worker function, either called directly or when ever possible
1658 * called thru a longjmp so we can exit safely on failure.
1659 *
1660 * @returns VBox status code.
1661 * @param pGVM The global (ring-0) VM structure.
1662 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1663 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1664 * @param enmOperation Which operation to execute.
1665 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1666 * The support driver validates this if it's present.
1667 * @param u64Arg Some simple constant argument.
1668 * @param pSession The session of the caller.
1669 *
1670 * @remarks Assume called with interrupts _enabled_.
1671 */
1672DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1673 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1674{
1675 /*
1676 * Validate pGVM and idCpu for consistency and validity.
1677 */
1678 if (pGVM != NULL)
1679 {
1680 if (RT_LIKELY(((uintptr_t)pGVM & HOST_PAGE_OFFSET_MASK) == 0))
1681 { /* likely */ }
1682 else
1683 {
1684 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1685 return VERR_INVALID_POINTER;
1686 }
1687
1688 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1689 { /* likely */ }
1690 else
1691 {
1692 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1693 return VERR_INVALID_PARAMETER;
1694 }
1695
1696 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1697 && pGVM->enmVMState <= VMSTATE_TERMINATED
1698 && pGVM->pSession == pSession
1699 && pGVM->pSelf == pGVM))
1700 { /* likely */ }
1701 else
1702 {
1703 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1704 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1705 return VERR_INVALID_POINTER;
1706 }
1707 }
1708 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1709 { /* likely */ }
1710 else
1711 {
1712 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1713 return VERR_INVALID_PARAMETER;
1714 }
1715
1716 /*
1717 * Process the request.
1718 */
1719 int rc;
1720 switch (enmOperation)
1721 {
1722 /*
1723 * GVM requests
1724 */
1725 case VMMR0_DO_GVMM_CREATE_VM:
1726 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1727 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1728 else
1729 rc = VERR_INVALID_PARAMETER;
1730 break;
1731
1732 case VMMR0_DO_GVMM_DESTROY_VM:
1733 if (pReqHdr == NULL && u64Arg == 0)
1734 rc = GVMMR0DestroyVM(pGVM);
1735 else
1736 rc = VERR_INVALID_PARAMETER;
1737 break;
1738
1739 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1740 if (pGVM != NULL)
1741 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1742 else
1743 rc = VERR_INVALID_PARAMETER;
1744 break;
1745
1746 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1747 if (pGVM != NULL)
1748 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1749 else
1750 rc = VERR_INVALID_PARAMETER;
1751 break;
1752
1753 case VMMR0_DO_GVMM_REGISTER_WORKER_THREAD:
1754 if (pGVM != NULL && pReqHdr && pReqHdr->cbReq == sizeof(GVMMREGISTERWORKERTHREADREQ))
1755 rc = GVMMR0RegisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg,
1756 ((PGVMMREGISTERWORKERTHREADREQ)(pReqHdr))->hNativeThreadR3);
1757 else
1758 rc = VERR_INVALID_PARAMETER;
1759 break;
1760
1761 case VMMR0_DO_GVMM_DEREGISTER_WORKER_THREAD:
1762 if (pGVM != NULL)
1763 rc = GVMMR0DeregisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg);
1764 else
1765 rc = VERR_INVALID_PARAMETER;
1766 break;
1767
1768 case VMMR0_DO_GVMM_SCHED_HALT:
1769 if (pReqHdr)
1770 return VERR_INVALID_PARAMETER;
1771 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1772 break;
1773
1774 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1775 if (pReqHdr || u64Arg)
1776 return VERR_INVALID_PARAMETER;
1777 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1778 break;
1779
1780 case VMMR0_DO_GVMM_SCHED_POKE:
1781 if (pReqHdr || u64Arg)
1782 return VERR_INVALID_PARAMETER;
1783 rc = GVMMR0SchedPoke(pGVM, idCpu);
1784 break;
1785
1786 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1787 if (u64Arg)
1788 return VERR_INVALID_PARAMETER;
1789 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1790 break;
1791
1792 case VMMR0_DO_GVMM_SCHED_POLL:
1793 if (pReqHdr || u64Arg > 1)
1794 return VERR_INVALID_PARAMETER;
1795 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1796 break;
1797
1798 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1799 if (u64Arg)
1800 return VERR_INVALID_PARAMETER;
1801 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1802 break;
1803
1804 case VMMR0_DO_GVMM_RESET_STATISTICS:
1805 if (u64Arg)
1806 return VERR_INVALID_PARAMETER;
1807 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1808 break;
1809
1810 /*
1811 * Initialize the R0 part of a VM instance.
1812 */
1813 case VMMR0_DO_VMMR0_INIT:
1814 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1815 break;
1816
1817 /*
1818 * Does EMT specific ring-0 init.
1819 */
1820 case VMMR0_DO_VMMR0_INIT_EMT:
1821 if (idCpu == NIL_VMCPUID)
1822 return VERR_INVALID_CPU_ID;
1823 rc = vmmR0InitVMEmt(pGVM, idCpu);
1824 break;
1825
1826 /*
1827 * Terminate the R0 part of a VM instance.
1828 */
1829 case VMMR0_DO_VMMR0_TERM:
1830 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1831 break;
1832
1833 /*
1834 * Update release or debug logger instances.
1835 */
1836 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
1837 if (idCpu == NIL_VMCPUID)
1838 return VERR_INVALID_CPU_ID;
1839 if (!(u64Arg & ~VMMR0UPDATELOGGER_F_VALID_MASK) && pReqHdr != NULL)
1840 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, u64Arg);
1841 else
1842 return VERR_INVALID_PARAMETER;
1843 break;
1844
1845 /*
1846 * Log flusher thread.
1847 */
1848 case VMMR0_DO_VMMR0_LOG_FLUSHER:
1849 if (idCpu != NIL_VMCPUID)
1850 return VERR_INVALID_CPU_ID;
1851 if (pReqHdr == NULL && pGVM != NULL)
1852 rc = vmmR0LogFlusher(pGVM);
1853 else
1854 return VERR_INVALID_PARAMETER;
1855 break;
1856
1857 /*
1858 * Wait for the flush to finish with all the buffers for the given logger.
1859 */
1860 case VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED:
1861 if (idCpu == NIL_VMCPUID)
1862 return VERR_INVALID_CPU_ID;
1863 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr == NULL)
1864 rc = vmmR0LogWaitFlushed(pGVM, idCpu /*idCpu*/, (size_t)u64Arg);
1865 else
1866 return VERR_INVALID_PARAMETER;
1867 break;
1868
1869 /*
1870 * Attempt to enable hm mode and check the current setting.
1871 */
1872 case VMMR0_DO_HM_ENABLE:
1873 rc = HMR0EnableAllCpus(pGVM);
1874 break;
1875
1876 /*
1877 * Setup the hardware accelerated session.
1878 */
1879 case VMMR0_DO_HM_SETUP_VM:
1880 rc = HMR0SetupVM(pGVM);
1881 break;
1882
1883 /*
1884 * PGM wrappers.
1885 */
1886 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1887 if (idCpu == NIL_VMCPUID)
1888 return VERR_INVALID_CPU_ID;
1889 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1890 break;
1891
1892 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1893 if (idCpu == NIL_VMCPUID)
1894 return VERR_INVALID_CPU_ID;
1895 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1896 break;
1897
1898 case VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE:
1899 if (idCpu == NIL_VMCPUID)
1900 return VERR_INVALID_CPU_ID;
1901 rc = PGMR0PhysAllocateLargePage(pGVM, idCpu, u64Arg);
1902 break;
1903
1904 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1905 if (idCpu != 0)
1906 return VERR_INVALID_CPU_ID;
1907 rc = PGMR0PhysSetupIoMmu(pGVM);
1908 break;
1909
1910 case VMMR0_DO_PGM_POOL_GROW:
1911 if (idCpu == NIL_VMCPUID)
1912 return VERR_INVALID_CPU_ID;
1913 rc = PGMR0PoolGrow(pGVM, idCpu);
1914 break;
1915
1916 case VMMR0_DO_PGM_PHYS_HANDLER_INIT:
1917 if (idCpu != 0 || pReqHdr != NULL || u64Arg > UINT32_MAX)
1918 return VERR_INVALID_PARAMETER;
1919 rc = PGMR0PhysHandlerInitReqHandler(pGVM, (uint32_t)u64Arg);
1920 break;
1921
1922 case VMMR0_DO_PGM_PHYS_ALLOCATE_RAM_RANGE:
1923 if (idCpu != 0 || u64Arg)
1924 return VERR_INVALID_PARAMETER;
1925 rc = PGMR0PhysAllocateRamRangeReq(pGVM, (PPGMPHYSALLOCATERAMRANGEREQ)pReqHdr);
1926 break;
1927
1928 case VMMR0_DO_PGM_PHYS_MMIO2_REGISTER:
1929 if (idCpu != 0 || u64Arg)
1930 return VERR_INVALID_PARAMETER;
1931 rc = PGMR0PhysMmio2RegisterReq(pGVM, (PPGMPHYSMMIO2REGISTERREQ)pReqHdr);
1932 break;
1933
1934 case VMMR0_DO_PGM_PHYS_MMIO2_DEREGISTER:
1935 if (idCpu != 0 || u64Arg)
1936 return VERR_INVALID_PARAMETER;
1937 rc = PGMR0PhysMmio2DeregisterReq(pGVM, (PPGMPHYSMMIO2DEREGISTERREQ)pReqHdr);
1938 break;
1939
1940 case VMMR0_DO_PGM_PHYS_ROM_ALLOCATE_RANGE:
1941 if (idCpu != 0 || u64Arg)
1942 return VERR_INVALID_PARAMETER;
1943 rc = PGMR0PhysRomAllocateRangeReq(pGVM, (PPGMPHYSROMALLOCATERANGEREQ)pReqHdr);
1944 break;
1945
1946 /*
1947 * GMM wrappers.
1948 */
1949 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1950 if (u64Arg)
1951 return VERR_INVALID_PARAMETER;
1952 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1953 break;
1954
1955 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1956 if (u64Arg)
1957 return VERR_INVALID_PARAMETER;
1958 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1959 break;
1960
1961 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1962 if (u64Arg)
1963 return VERR_INVALID_PARAMETER;
1964 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1965 break;
1966
1967 case VMMR0_DO_GMM_FREE_PAGES:
1968 if (u64Arg)
1969 return VERR_INVALID_PARAMETER;
1970 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1971 break;
1972
1973 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1974 if (u64Arg)
1975 return VERR_INVALID_PARAMETER;
1976 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1977 break;
1978
1979 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1980 if (u64Arg)
1981 return VERR_INVALID_PARAMETER;
1982 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1983 break;
1984
1985 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1986 if (idCpu == NIL_VMCPUID)
1987 return VERR_INVALID_CPU_ID;
1988 if (u64Arg)
1989 return VERR_INVALID_PARAMETER;
1990 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1991 break;
1992
1993 case VMMR0_DO_GMM_BALLOONED_PAGES:
1994 if (u64Arg)
1995 return VERR_INVALID_PARAMETER;
1996 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1997 break;
1998
1999 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
2000 if (u64Arg)
2001 return VERR_INVALID_PARAMETER;
2002 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
2003 break;
2004
2005 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
2006 if (idCpu == NIL_VMCPUID)
2007 return VERR_INVALID_CPU_ID;
2008 if (u64Arg)
2009 return VERR_INVALID_PARAMETER;
2010 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
2011 break;
2012
2013 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
2014 if (idCpu == NIL_VMCPUID)
2015 return VERR_INVALID_CPU_ID;
2016 if (u64Arg)
2017 return VERR_INVALID_PARAMETER;
2018 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
2019 break;
2020
2021 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
2022 if (idCpu == NIL_VMCPUID)
2023 return VERR_INVALID_CPU_ID;
2024 if ( u64Arg
2025 || pReqHdr)
2026 return VERR_INVALID_PARAMETER;
2027 rc = GMMR0ResetSharedModules(pGVM, idCpu);
2028 break;
2029
2030#ifdef VBOX_WITH_PAGE_SHARING
2031 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
2032 {
2033 if (idCpu == NIL_VMCPUID)
2034 return VERR_INVALID_CPU_ID;
2035 if ( u64Arg
2036 || pReqHdr)
2037 return VERR_INVALID_PARAMETER;
2038 rc = GMMR0CheckSharedModules(pGVM, idCpu);
2039 break;
2040 }
2041#endif
2042
2043#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2044 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2045 if (u64Arg)
2046 return VERR_INVALID_PARAMETER;
2047 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2048 break;
2049#endif
2050
2051 case VMMR0_DO_GMM_QUERY_STATISTICS:
2052 if (u64Arg)
2053 return VERR_INVALID_PARAMETER;
2054 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2055 break;
2056
2057 case VMMR0_DO_GMM_RESET_STATISTICS:
2058 if (u64Arg)
2059 return VERR_INVALID_PARAMETER;
2060 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2061 break;
2062
2063 /*
2064 * A quick GCFGM mock-up.
2065 */
2066 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2067 case VMMR0_DO_GCFGM_SET_VALUE:
2068 case VMMR0_DO_GCFGM_QUERY_VALUE:
2069 {
2070 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2071 return VERR_INVALID_PARAMETER;
2072 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2073 if (pReq->Hdr.cbReq != sizeof(*pReq))
2074 return VERR_INVALID_PARAMETER;
2075 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2076 {
2077 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2078 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2079 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2080 }
2081 else
2082 {
2083 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2084 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2085 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2086 }
2087 break;
2088 }
2089
2090 /*
2091 * PDM Wrappers.
2092 */
2093 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2094 {
2095 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2096 return VERR_INVALID_PARAMETER;
2097 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2098 break;
2099 }
2100
2101 case VMMR0_DO_PDM_DEVICE_CREATE:
2102 {
2103 if (!pReqHdr || u64Arg || idCpu != 0)
2104 return VERR_INVALID_PARAMETER;
2105 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2106 break;
2107 }
2108
2109 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2110 {
2111 if (!pReqHdr || u64Arg)
2112 return VERR_INVALID_PARAMETER;
2113 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2114 break;
2115 }
2116
2117 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2118 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2119 {
2120 if (!pReqHdr || u64Arg || idCpu != 0)
2121 return VERR_INVALID_PARAMETER;
2122 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2123 break;
2124 }
2125
2126 case VMMR0_DO_PDM_QUEUE_CREATE:
2127 {
2128 if (!pReqHdr || u64Arg || idCpu != 0)
2129 return VERR_INVALID_PARAMETER;
2130 rc = PDMR0QueueCreateReqHandler(pGVM, (PPDMQUEUECREATEREQ)pReqHdr);
2131 break;
2132 }
2133
2134 /*
2135 * Requests to the internal networking service.
2136 */
2137 case VMMR0_DO_INTNET_OPEN:
2138 {
2139 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2140 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2141 return VERR_INVALID_PARAMETER;
2142 rc = IntNetR0OpenReq(pSession, pReq);
2143 break;
2144 }
2145
2146 case VMMR0_DO_INTNET_IF_CLOSE:
2147 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2148 return VERR_INVALID_PARAMETER;
2149 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2150 break;
2151
2152
2153 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2154 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2155 return VERR_INVALID_PARAMETER;
2156 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2157 break;
2158
2159 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2160 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2161 return VERR_INVALID_PARAMETER;
2162 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2163 break;
2164
2165 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2166 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2167 return VERR_INVALID_PARAMETER;
2168 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2169 break;
2170
2171 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2172 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2173 return VERR_INVALID_PARAMETER;
2174 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2175 break;
2176
2177 case VMMR0_DO_INTNET_IF_SEND:
2178 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2179 return VERR_INVALID_PARAMETER;
2180 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2181 break;
2182
2183 case VMMR0_DO_INTNET_IF_WAIT:
2184 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2185 return VERR_INVALID_PARAMETER;
2186 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2187 break;
2188
2189 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2190 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2191 return VERR_INVALID_PARAMETER;
2192 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2193 break;
2194
2195#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2196 /*
2197 * Requests to host PCI driver service.
2198 */
2199 case VMMR0_DO_PCIRAW_REQ:
2200 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2201 return VERR_INVALID_PARAMETER;
2202 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2203 break;
2204#endif
2205
2206 /*
2207 * NEM requests.
2208 */
2209#ifdef VBOX_WITH_NEM_R0
2210# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2211 case VMMR0_DO_NEM_INIT_VM:
2212 if (u64Arg || pReqHdr || idCpu != 0)
2213 return VERR_INVALID_PARAMETER;
2214 rc = NEMR0InitVM(pGVM);
2215 break;
2216
2217 case VMMR0_DO_NEM_INIT_VM_PART_2:
2218 if (u64Arg || pReqHdr || idCpu != 0)
2219 return VERR_INVALID_PARAMETER;
2220 rc = NEMR0InitVMPart2(pGVM);
2221 break;
2222
2223 case VMMR0_DO_NEM_MAP_PAGES:
2224 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2225 return VERR_INVALID_PARAMETER;
2226 rc = NEMR0MapPages(pGVM, idCpu);
2227 break;
2228
2229 case VMMR0_DO_NEM_UNMAP_PAGES:
2230 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2231 return VERR_INVALID_PARAMETER;
2232 rc = NEMR0UnmapPages(pGVM, idCpu);
2233 break;
2234
2235 case VMMR0_DO_NEM_EXPORT_STATE:
2236 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2237 return VERR_INVALID_PARAMETER;
2238 rc = NEMR0ExportState(pGVM, idCpu);
2239 break;
2240
2241 case VMMR0_DO_NEM_IMPORT_STATE:
2242 if (pReqHdr || idCpu == NIL_VMCPUID)
2243 return VERR_INVALID_PARAMETER;
2244 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2245 break;
2246
2247 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2248 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2249 return VERR_INVALID_PARAMETER;
2250 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2251 break;
2252
2253 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2254 if (pReqHdr || idCpu == NIL_VMCPUID)
2255 return VERR_INVALID_PARAMETER;
2256 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2257 break;
2258
2259 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2260 if (u64Arg || pReqHdr)
2261 return VERR_INVALID_PARAMETER;
2262 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2263 break;
2264
2265# if 1 && defined(DEBUG_bird)
2266 case VMMR0_DO_NEM_EXPERIMENT:
2267 if (pReqHdr)
2268 return VERR_INVALID_PARAMETER;
2269 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2270 break;
2271# endif
2272# endif
2273#endif
2274
2275 /*
2276 * IOM requests.
2277 */
2278 case VMMR0_DO_IOM_GROW_IO_PORTS:
2279 {
2280 if (pReqHdr || idCpu != 0)
2281 return VERR_INVALID_PARAMETER;
2282 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2283 break;
2284 }
2285
2286 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2287 {
2288 if (pReqHdr || idCpu != 0)
2289 return VERR_INVALID_PARAMETER;
2290 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2291 break;
2292 }
2293
2294 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2295 {
2296 if (pReqHdr || idCpu != 0)
2297 return VERR_INVALID_PARAMETER;
2298 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2299 break;
2300 }
2301
2302 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2303 {
2304 if (pReqHdr || idCpu != 0)
2305 return VERR_INVALID_PARAMETER;
2306 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2307 break;
2308 }
2309
2310 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2311 {
2312 if (pReqHdr || idCpu != 0)
2313 return VERR_INVALID_PARAMETER;
2314 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2315 if (RT_SUCCESS(rc))
2316 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2317 break;
2318 }
2319
2320 /*
2321 * DBGF requests.
2322 */
2323#ifdef VBOX_WITH_DBGF_TRACING
2324 case VMMR0_DO_DBGF_TRACER_CREATE:
2325 {
2326 if (!pReqHdr || u64Arg || idCpu != 0)
2327 return VERR_INVALID_PARAMETER;
2328 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2329 break;
2330 }
2331
2332 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2333 {
2334 if (!pReqHdr || u64Arg)
2335 return VERR_INVALID_PARAMETER;
2336# if 0 /** @todo */
2337 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2338# else
2339 rc = VERR_NOT_IMPLEMENTED;
2340# endif
2341 break;
2342 }
2343#endif
2344
2345 case VMMR0_DO_DBGF_BP_INIT:
2346 {
2347 if (!pReqHdr || u64Arg || idCpu != 0)
2348 return VERR_INVALID_PARAMETER;
2349 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2350 break;
2351 }
2352
2353 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2354 {
2355 if (!pReqHdr || u64Arg || idCpu != 0)
2356 return VERR_INVALID_PARAMETER;
2357 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2358 break;
2359 }
2360
2361 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2362 {
2363 if (!pReqHdr || u64Arg || idCpu != 0)
2364 return VERR_INVALID_PARAMETER;
2365 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2366 break;
2367 }
2368
2369 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2370 {
2371 if (!pReqHdr || u64Arg || idCpu != 0)
2372 return VERR_INVALID_PARAMETER;
2373 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2374 break;
2375 }
2376
2377 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2378 {
2379 if (!pReqHdr || u64Arg || idCpu != 0)
2380 return VERR_INVALID_PARAMETER;
2381 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2382 break;
2383 }
2384
2385
2386 /*
2387 * TM requests.
2388 */
2389 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2390 {
2391 if (pReqHdr || idCpu == NIL_VMCPUID)
2392 return VERR_INVALID_PARAMETER;
2393 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2394 break;
2395 }
2396
2397 /*
2398 * For profiling.
2399 */
2400 case VMMR0_DO_NOP:
2401 case VMMR0_DO_SLOW_NOP:
2402 return VINF_SUCCESS;
2403
2404 /*
2405 * For testing Ring-0 APIs invoked in this environment.
2406 */
2407 case VMMR0_DO_TESTS:
2408 /** @todo make new test */
2409 return VINF_SUCCESS;
2410
2411 default:
2412 /*
2413 * We're returning VERR_NOT_SUPPORT here so we've got something else
2414 * than -1 which the interrupt gate glue code might return.
2415 */
2416 Log(("operation %#x is not supported\n", enmOperation));
2417 return VERR_NOT_SUPPORTED;
2418 }
2419 return rc;
2420}
2421
2422
2423/**
2424 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2425 *
2426 * @returns VBox status code.
2427 * @param pvArgs The argument package
2428 */
2429static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2430{
2431 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2432 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2433 pGVCpu->vmmr0.s.idCpu,
2434 pGVCpu->vmmr0.s.enmOperation,
2435 pGVCpu->vmmr0.s.pReq,
2436 pGVCpu->vmmr0.s.u64Arg,
2437 pGVCpu->vmmr0.s.pSession);
2438}
2439
2440
2441/**
2442 * The Ring 0 entry point, called by the support library (SUP).
2443 *
2444 * @returns VBox status code.
2445 * @param pGVM The global (ring-0) VM structure.
2446 * @param pVM The cross context VM structure.
2447 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2448 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2449 * @param enmOperation Which operation to execute.
2450 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2451 * @param u64Arg Some simple constant argument.
2452 * @param pSession The session of the caller.
2453 * @remarks Assume called with interrupts _enabled_.
2454 */
2455VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2456 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2457{
2458 /*
2459 * Requests that should only happen on the EMT thread will be
2460 * wrapped in a setjmp so we can assert without causing too much trouble.
2461 */
2462 if ( pVM != NULL
2463 && pGVM != NULL
2464 && pVM == pGVM /** @todo drop pVM or pGVM */
2465 && idCpu < pGVM->cCpus
2466 && pGVM->pSession == pSession
2467 && pGVM->pSelf == pGVM
2468 && enmOperation != VMMR0_DO_GVMM_DESTROY_VM
2469 && enmOperation != VMMR0_DO_GVMM_REGISTER_VMCPU
2470 && enmOperation != VMMR0_DO_GVMM_SCHED_WAKE_UP /* idCpu is not caller but target. Sigh. */ /** @todo fix*/
2471 && enmOperation != VMMR0_DO_GVMM_SCHED_POKE /* idCpu is not caller but target. Sigh. */ /** @todo fix*/
2472 )
2473 {
2474 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2475 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2476 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2477 && pGVCpu->hNativeThreadR0 == hNativeThread))
2478 {
2479 pGVCpu->vmmr0.s.pGVM = pGVM;
2480 pGVCpu->vmmr0.s.idCpu = idCpu;
2481 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2482 pGVCpu->vmmr0.s.pReq = pReq;
2483 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2484 pGVCpu->vmmr0.s.pSession = pSession;
2485 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmmr0.s.AssertJmpBuf, vmmR0EntryExWrapper, pGVCpu,
2486 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2487 }
2488 return VERR_VM_THREAD_NOT_EMT;
2489 }
2490 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2491}
2492
2493
2494/*********************************************************************************************************************************
2495* EMT Blocking *
2496*********************************************************************************************************************************/
2497
2498/**
2499 * Checks whether we've armed the ring-0 long jump machinery.
2500 *
2501 * @returns @c true / @c false
2502 * @param pVCpu The cross context virtual CPU structure.
2503 * @thread EMT
2504 * @sa VMMIsLongJumpArmed
2505 */
2506VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2507{
2508#ifdef RT_ARCH_X86
2509 return pVCpu->vmmr0.s.AssertJmpBuf.eip != 0;
2510#else
2511 return pVCpu->vmmr0.s.AssertJmpBuf.rip != 0;
2512#endif
2513}
2514
2515
2516/**
2517 * Locking helper that deals with HM context and checks if the thread can block.
2518 *
2519 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2520 * VERR_VMM_CANNOT_BLOCK if not able to block.
2521 * @param pVCpu The cross context virtual CPU structure of the calling
2522 * thread.
2523 * @param rcBusy What to return in case of a blocking problem. Will IPE
2524 * if VINF_SUCCESS and we cannot block.
2525 * @param pszCaller The caller (for logging problems).
2526 * @param pvLock The lock address (for logging problems).
2527 * @param pCtx Where to return context info for the resume call.
2528 * @thread EMT(pVCpu)
2529 */
2530VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2531 PVMMR0EMTBLOCKCTX pCtx)
2532{
2533 const char *pszMsg;
2534
2535 /*
2536 * Check that we are allowed to block.
2537 */
2538 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2539 {
2540 /*
2541 * Are we in HM context and w/o a context hook? If so work the context hook.
2542 */
2543 if (pVCpu->idHostCpu != NIL_RTCPUID)
2544 {
2545 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2546
2547 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2548 {
2549 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2550 if (pVCpu->vmmr0.s.pPreemptState)
2551 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2552
2553 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2554 pCtx->fWasInHmContext = true;
2555 return VINF_SUCCESS;
2556 }
2557 }
2558
2559 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2560 {
2561 /*
2562 * Not in HM context or we've got hooks, so just check that preemption
2563 * is enabled.
2564 */
2565 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2566 {
2567 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2568 pCtx->fWasInHmContext = false;
2569 return VINF_SUCCESS;
2570 }
2571 pszMsg = "Preemption is disabled!";
2572 }
2573 else
2574 pszMsg = "Preemption state w/o HM state!";
2575 }
2576 else
2577 pszMsg = "Ring-3 calls are disabled!";
2578
2579 static uint32_t volatile s_cWarnings = 0;
2580 if (++s_cWarnings < 50)
2581 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2582 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2583 pCtx->fWasInHmContext = false;
2584 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2585}
2586
2587
2588/**
2589 * Counterpart to VMMR0EmtPrepareToBlock.
2590 *
2591 * @param pVCpu The cross context virtual CPU structure of the calling
2592 * thread.
2593 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2594 * @thread EMT(pVCpu)
2595 */
2596VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2597{
2598 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2599 if (pCtx->fWasInHmContext)
2600 {
2601 if (pVCpu->vmmr0.s.pPreemptState)
2602 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2603
2604 pCtx->fWasInHmContext = false;
2605 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2606 }
2607 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2608}
2609
2610
2611/**
2612 * Helper for waiting on an RTSEMEVENT, caller did VMMR0EmtPrepareToBlock.
2613 *
2614 * @returns
2615 * @retval VERR_THREAD_IS_TERMINATING
2616 * @retval VERR_TIMEOUT if we ended up waiting too long, either according to
2617 * @a cMsTimeout or to maximum wait values.
2618 *
2619 * @param pGVCpu The ring-0 virtual CPU structure.
2620 * @param fFlags VMMR0EMTWAIT_F_XXX.
2621 * @param hEvent The event to wait on.
2622 * @param cMsTimeout The timeout or RT_INDEFINITE_WAIT.
2623 */
2624VMMR0_INT_DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
2625{
2626 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2627
2628 /*
2629 * Note! Similar code is found in the PDM critical sections too.
2630 */
2631 uint64_t const nsStart = RTTimeNanoTS();
2632 uint64_t cNsMaxTotal = cMsTimeout == RT_INDEFINITE_WAIT
2633 ? RT_NS_5MIN : RT_MIN(RT_NS_5MIN, RT_NS_1MS_64 * cMsTimeout);
2634 uint32_t cMsMaxOne = RT_MS_5SEC;
2635 bool fNonInterruptible = false;
2636 for (;;)
2637 {
2638 /* Wait. */
2639 int rcWait = !fNonInterruptible
2640 ? RTSemEventWaitNoResume(hEvent, cMsMaxOne)
2641 : RTSemEventWait(hEvent, cMsMaxOne);
2642 if (RT_SUCCESS(rcWait))
2643 return rcWait;
2644
2645 if (rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED)
2646 {
2647 uint64_t const cNsElapsed = RTTimeNanoTS() - nsStart;
2648
2649 /*
2650 * Check the thread termination status.
2651 */
2652 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
2653 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
2654 ("rcTerm=%Rrc\n", rcTerm));
2655 if ( rcTerm == VERR_NOT_SUPPORTED
2656 && !fNonInterruptible
2657 && cNsMaxTotal > RT_NS_1MIN)
2658 cNsMaxTotal = RT_NS_1MIN;
2659
2660 /* We return immediately if it looks like the thread is terminating. */
2661 if (rcTerm == VINF_THREAD_IS_TERMINATING)
2662 return VERR_THREAD_IS_TERMINATING;
2663
2664 /* We may suppress VERR_INTERRUPTED if VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED was
2665 specified, otherwise we'll just return it. */
2666 if (rcWait == VERR_INTERRUPTED)
2667 {
2668 if (!(fFlags & VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED))
2669 return VERR_INTERRUPTED;
2670 if (!fNonInterruptible)
2671 {
2672 /* First time: Adjust down the wait parameters and make sure we get at least
2673 one non-interruptible wait before timing out. */
2674 fNonInterruptible = true;
2675 cMsMaxOne = 32;
2676 uint64_t const cNsLeft = cNsMaxTotal - cNsElapsed;
2677 if (cNsLeft > RT_NS_10SEC)
2678 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
2679 continue;
2680 }
2681 }
2682
2683 /* Check for timeout. */
2684 if (cNsElapsed > cNsMaxTotal)
2685 return VERR_TIMEOUT;
2686 }
2687 else
2688 return rcWait;
2689 }
2690 /* not reached */
2691}
2692
2693
2694/**
2695 * Helper for signalling an SUPSEMEVENT.
2696 *
2697 * This may temporarily leave the HM context if the host requires that for
2698 * signalling SUPSEMEVENT objects.
2699 *
2700 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2701 * @param pGVM The ring-0 VM structure.
2702 * @param pGVCpu The ring-0 virtual CPU structure.
2703 * @param hEvent The event to signal.
2704 */
2705VMMR0_INT_DECL(int) VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent)
2706{
2707 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2708 if (RTSemEventIsSignalSafe())
2709 return SUPSemEventSignal(pGVM->pSession, hEvent);
2710
2711 VMMR0EMTBLOCKCTX Ctx;
2712 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2713 if (RT_SUCCESS(rc))
2714 {
2715 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2716 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2717 }
2718 return rc;
2719}
2720
2721
2722/**
2723 * Helper for signalling an SUPSEMEVENT, variant supporting non-EMTs.
2724 *
2725 * This may temporarily leave the HM context if the host requires that for
2726 * signalling SUPSEMEVENT objects.
2727 *
2728 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2729 * @param pGVM The ring-0 VM structure.
2730 * @param hEvent The event to signal.
2731 */
2732VMMR0_INT_DECL(int) VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent)
2733{
2734 if (!RTSemEventIsSignalSafe())
2735 {
2736 PGVMCPU pGVCpu = GVMMR0GetGVCpuByGVMandEMT(pGVM, NIL_RTNATIVETHREAD);
2737 if (pGVCpu)
2738 {
2739 VMMR0EMTBLOCKCTX Ctx;
2740 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2741 if (RT_SUCCESS(rc))
2742 {
2743 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2744 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2745 }
2746 return rc;
2747 }
2748 }
2749 return SUPSemEventSignal(pGVM->pSession, hEvent);
2750}
2751
2752
2753/*********************************************************************************************************************************
2754* Logging. *
2755*********************************************************************************************************************************/
2756
2757/**
2758 * VMMR0_DO_VMMR0_UPDATE_LOGGERS: Updates the EMT loggers for the VM.
2759 *
2760 * @returns VBox status code.
2761 * @param pGVM The global (ring-0) VM structure.
2762 * @param idCpu The ID of the calling EMT.
2763 * @param pReq The request data.
2764 * @param fFlags Flags, see VMMR0UPDATELOGGER_F_XXX.
2765 * @thread EMT(idCpu)
2766 */
2767static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, uint64_t fFlags)
2768{
2769 /*
2770 * Check sanity. First we require EMT to be calling us.
2771 */
2772 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2773 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2774
2775 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2776 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2777 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2778
2779 size_t const idxLogger = (size_t)(fFlags & VMMR0UPDATELOGGER_F_LOGGER_MASK);
2780 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2781
2782 /*
2783 * Adjust flags.
2784 */
2785 /* Always buffered, unless logging directly to parent VMM: */
2786 if (!(fFlags & (VMMR0UPDATELOGGER_F_TO_PARENT_VMM_DBG | VMMR0UPDATELOGGER_F_TO_PARENT_VMM_REL)))
2787 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2788 /* These doesn't make sense at present: */
2789 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2790 /* We've traditionally skipped the group restrictions. */
2791 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2792
2793 /*
2794 * Do the updating.
2795 */
2796 int rc = VINF_SUCCESS;
2797 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2798 {
2799 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2800 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.aLoggers[idxLogger].pLogger;
2801 if (pLogger)
2802 {
2803 pGVCpu->vmmr0.s.u.aLoggers[idxLogger].fFlushToParentVmmDbg = RT_BOOL(fFlags & VMMR0UPDATELOGGER_F_TO_PARENT_VMM_DBG);
2804 pGVCpu->vmmr0.s.u.aLoggers[idxLogger].fFlushToParentVmmRel = RT_BOOL(fFlags & VMMR0UPDATELOGGER_F_TO_PARENT_VMM_REL);
2805
2806 RTLogSetR0ProgramStart(pLogger, pGVM->vmm.s.nsProgramStart);
2807 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2808 }
2809 }
2810
2811 return rc;
2812}
2813
2814
2815/**
2816 * VMMR0_DO_VMMR0_LOG_FLUSHER: Get the next log flushing job.
2817 *
2818 * The job info is copied into VMM::LogFlusherItem.
2819 *
2820 * @returns VBox status code.
2821 * @retval VERR_OBJECT_DESTROYED if we're shutting down.
2822 * @retval VERR_NOT_OWNER if the calling thread is not the flusher thread.
2823 * @param pGVM The global (ring-0) VM structure.
2824 * @thread The log flusher thread (first caller automatically becomes the log
2825 * flusher).
2826 */
2827static int vmmR0LogFlusher(PGVM pGVM)
2828{
2829 /*
2830 * Check that this really is the flusher thread.
2831 */
2832 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2833 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
2834 if (RT_LIKELY(pGVM->vmmr0.s.LogFlusher.hThread == hNativeSelf))
2835 { /* likely */ }
2836 else
2837 {
2838 /* The first caller becomes the flusher thread. */
2839 bool fOk;
2840 ASMAtomicCmpXchgHandle(&pGVM->vmmr0.s.LogFlusher.hThread, hNativeSelf, NIL_RTNATIVETHREAD, fOk);
2841 if (!fOk)
2842 return VERR_NOT_OWNER;
2843 pGVM->vmmr0.s.LogFlusher.fThreadRunning = true;
2844 }
2845
2846 /*
2847 * Acknowledge flush, waking up waiting EMT.
2848 */
2849 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2850
2851 uint32_t idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2852 uint32_t idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2853 if ( idxTail != idxHead
2854 && pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing)
2855 {
2856 /* Pop the head off the ring buffer. */
2857 uint32_t const idCpu = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idCpu;
2858 uint32_t const idxLogger = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxLogger;
2859 uint32_t const idxBuffer = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxBuffer;
2860
2861 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32 = UINT32_MAX >> 1; /* invalidate the entry */
2862 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2863
2864 /* Validate content. */
2865 if ( idCpu < pGVM->cCpus
2866 && idxLogger < VMMLOGGER_IDX_MAX
2867 && idxBuffer < VMMLOGGER_BUFFER_COUNT)
2868 {
2869 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2870 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2871 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2872
2873 /*
2874 * Accounting.
2875 */
2876 uint32_t cFlushing = pR0Log->cFlushing - 1;
2877 if (RT_LIKELY(cFlushing < VMMLOGGER_BUFFER_COUNT))
2878 { /*likely*/ }
2879 else
2880 cFlushing = 0;
2881 pR0Log->cFlushing = cFlushing;
2882 ASMAtomicWriteU32(&pShared->cFlushing, cFlushing);
2883
2884 /*
2885 * Wake up the EMT if it's waiting.
2886 */
2887 if (!pR0Log->fEmtWaiting)
2888 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2889 else
2890 {
2891 pR0Log->fEmtWaiting = false;
2892 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2893
2894 int rc = RTSemEventSignal(pR0Log->hEventFlushWait);
2895 if (RT_FAILURE(rc))
2896 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n",
2897 idxHead, idCpu, idxLogger, idxBuffer, rc));
2898 }
2899 }
2900 else
2901 {
2902 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2903 LogRelMax(64, ("vmmR0LogFlusher: Bad ACK entry #%u: %u/%u/%u!\n", idxHead, idCpu, idxLogger, idxBuffer));
2904 }
2905
2906 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2907 }
2908
2909 /*
2910 * The wait loop.
2911 */
2912 int rc;
2913 for (;;)
2914 {
2915 /*
2916 * Work pending?
2917 */
2918 idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2919 idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2920 if (idxTail != idxHead)
2921 {
2922 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing = true;
2923 pGVM->vmm.s.LogFlusherItem.u32 = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32;
2924
2925 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2926 return VINF_SUCCESS;
2927 }
2928
2929 /*
2930 * Nothing to do, so, check for termination and go to sleep.
2931 */
2932 if (!pGVM->vmmr0.s.LogFlusher.fThreadShutdown)
2933 { /* likely */ }
2934 else
2935 {
2936 rc = VERR_OBJECT_DESTROYED;
2937 break;
2938 }
2939
2940 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = true;
2941 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2942
2943 rc = RTSemEventWaitNoResume(pGVM->vmmr0.s.LogFlusher.hEvent, RT_MS_5MIN);
2944
2945 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2946 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
2947
2948 if (RT_SUCCESS(rc) || rc == VERR_TIMEOUT)
2949 { /* likely */ }
2950 else if (rc == VERR_INTERRUPTED)
2951 {
2952 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2953 return rc;
2954 }
2955 else if (rc == VERR_SEM_DESTROYED || rc == VERR_INVALID_HANDLE)
2956 break;
2957 else
2958 {
2959 LogRel(("vmmR0LogFlusher: RTSemEventWaitNoResume returned unexpected status %Rrc\n", rc));
2960 break;
2961 }
2962 }
2963
2964 /*
2965 * Terminating - prevent further calls and indicate to the EMTs that we're no longer around.
2966 */
2967 pGVM->vmmr0.s.LogFlusher.hThread = ~pGVM->vmmr0.s.LogFlusher.hThread; /* (should be reasonably safe) */
2968 pGVM->vmmr0.s.LogFlusher.fThreadRunning = false;
2969
2970 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2971 return rc;
2972}
2973
2974
2975/**
2976 * VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED: Waits for the flusher thread to finish all
2977 * buffers for logger @a idxLogger.
2978 *
2979 * @returns VBox status code.
2980 * @param pGVM The global (ring-0) VM structure.
2981 * @param idCpu The ID of the calling EMT.
2982 * @param idxLogger Which logger to wait on.
2983 * @thread EMT(idCpu)
2984 */
2985static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger)
2986{
2987 /*
2988 * Check sanity. First we require EMT to be calling us.
2989 */
2990 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2991 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2992 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2993 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2994 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2995
2996 /*
2997 * Do the waiting.
2998 */
2999 int rc = VINF_SUCCESS;
3000 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3001 uint32_t cFlushing = pR0Log->cFlushing;
3002 while (cFlushing > 0)
3003 {
3004 pR0Log->fEmtWaiting = true;
3005 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3006
3007 rc = RTSemEventWaitNoResume(pR0Log->hEventFlushWait, RT_MS_5MIN);
3008
3009 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3010 pR0Log->fEmtWaiting = false;
3011 if (RT_SUCCESS(rc))
3012 {
3013 /* Read the new count, make sure it decreased before looping. That
3014 way we can guarentee that we will only wait more than 5 min * buffers. */
3015 uint32_t const cPrevFlushing = cFlushing;
3016 cFlushing = pR0Log->cFlushing;
3017 if (cFlushing < cPrevFlushing)
3018 continue;
3019 rc = VERR_INTERNAL_ERROR_3;
3020 }
3021 break;
3022 }
3023 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3024 return rc;
3025}
3026
3027
3028/**
3029 * Inner worker for vmmR0LoggerFlushCommon for flushing to ring-3.
3030 */
3031static bool vmmR0LoggerFlushInnerToRing3(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
3032{
3033 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3034 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3035
3036 /*
3037 * Figure out what we need to do and whether we can.
3038 */
3039 enum { kJustSignal, kPrepAndSignal, kPrepSignalAndWait } enmAction;
3040#if VMMLOGGER_BUFFER_COUNT >= 2
3041 if (pR0Log->cFlushing < VMMLOGGER_BUFFER_COUNT - 1)
3042 {
3043 if (RTSemEventIsSignalSafe())
3044 enmAction = kJustSignal;
3045 else if (VMMRZCallRing3IsEnabled(pGVCpu))
3046 enmAction = kPrepAndSignal;
3047 else
3048 {
3049 /** @todo This is a bit simplistic. We could introduce a FF to signal the
3050 * thread or similar. */
3051 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3052# if defined(RT_OS_LINUX)
3053 SUP_DPRINTF(("vmmR0LoggerFlush: Signalling not safe and EMT blocking disabled! (%u bytes)\n", cbToFlush));
3054# endif
3055 pShared->cbDropped += cbToFlush;
3056 return true;
3057 }
3058 }
3059 else
3060#endif
3061 if (VMMRZCallRing3IsEnabled(pGVCpu))
3062 enmAction = kPrepSignalAndWait;
3063 else
3064 {
3065 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3066# if defined(RT_OS_LINUX)
3067 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", cbToFlush));
3068# endif
3069 pShared->cbDropped += cbToFlush;
3070 return true;
3071 }
3072
3073 /*
3074 * Prepare for blocking if necessary.
3075 */
3076 VMMR0EMTBLOCKCTX Ctx;
3077 if (enmAction != kJustSignal)
3078 {
3079 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushInnerToRing3", pR0Log->hEventFlushWait, &Ctx);
3080 if (RT_SUCCESS(rc))
3081 { /* likely */ }
3082 else
3083 {
3084 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3085 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc));
3086 return false;
3087 }
3088 }
3089
3090 /*
3091 * Queue the flush job.
3092 */
3093 bool fFlushedBuffer;
3094 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3095 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning)
3096 {
3097 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3098 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3099 uint32_t const idxNewTail = (idxTail + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3100 if (idxNewTail != idxHead)
3101 {
3102 /* Queue it. */
3103 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu;
3104 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger;
3105 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer;
3106 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0;
3107 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail;
3108
3109 /* Update the number of buffers currently being flushed. */
3110 uint32_t cFlushing = pR0Log->cFlushing;
3111 cFlushing = RT_MIN(cFlushing + 1, VMMLOGGER_BUFFER_COUNT);
3112 pShared->cFlushing = pR0Log->cFlushing = cFlushing;
3113
3114 /* We must wait if all buffers are currently being flushed. */
3115 bool const fEmtWaiting = cFlushing >= VMMLOGGER_BUFFER_COUNT && enmAction != kJustSignal /* paranoia */;
3116 pR0Log->fEmtWaiting = fEmtWaiting;
3117
3118 /* Stats. */
3119 STAM_REL_COUNTER_INC(&pShared->StatFlushes);
3120 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes);
3121
3122 /* Signal the worker thread. */
3123 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting)
3124 {
3125 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3126 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
3127 }
3128 else
3129 {
3130 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp);
3131 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3132 }
3133
3134 /*
3135 * Wait for a buffer to finish flushing.
3136 *
3137 * Note! Lazy bird is ignoring the status code here. The result is
3138 * that we might end up with an extra even signalling and the
3139 * next time we need to wait we won't and end up with some log
3140 * corruption. However, it's too much hazzle right now for
3141 * a scenario which would most likely end the process rather
3142 * than causing log corruption.
3143 */
3144 if (fEmtWaiting)
3145 {
3146 STAM_REL_PROFILE_START(&pShared->StatWait, a);
3147 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED,
3148 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT);
3149 STAM_REL_PROFILE_STOP(&pShared->StatWait, a);
3150 }
3151
3152 /*
3153 * We always switch buffer if we have more than one.
3154 */
3155#if VMMLOGGER_BUFFER_COUNT == 1
3156 fFlushedBuffer = true;
3157#else
3158 AssertCompile(VMMLOGGER_BUFFER_COUNT >= 1);
3159 pShared->idxBuf = (idxBuffer + 1) % VMMLOGGER_BUFFER_COUNT;
3160 fFlushedBuffer = false;
3161#endif
3162 }
3163 else
3164 {
3165 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3166 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n"));
3167 fFlushedBuffer = true;
3168 }
3169 }
3170 else
3171 {
3172 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3173 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", cbToFlush));
3174 fFlushedBuffer = true;
3175 }
3176
3177 /*
3178 * Restore the HM context.
3179 */
3180 if (enmAction != kJustSignal)
3181 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
3182
3183 return fFlushedBuffer;
3184}
3185
3186
3187/**
3188 * Inner worker for vmmR0LoggerFlushCommon when only flushing to the parent
3189 * VMM's logs.
3190 */
3191static bool vmmR0LoggerFlushInnerToParent(PVMMR0PERVCPULOGGER pR0Log, PRTLOGBUFFERDESC pBufDesc)
3192{
3193 uint32_t const cbToFlush = pBufDesc->offBuf;
3194 if (pR0Log->fFlushToParentVmmDbg)
3195 RTLogWriteVmm(pBufDesc->pchBuf, cbToFlush, false /*fRelease*/);
3196 if (pR0Log->fFlushToParentVmmRel)
3197 RTLogWriteVmm(pBufDesc->pchBuf, cbToFlush, true /*fRelease*/);
3198 return true;
3199}
3200
3201
3202
3203/**
3204 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
3205 */
3206static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, uint32_t idxLogger)
3207{
3208 /*
3209 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
3210 * (This is a bit paranoid code.)
3211 */
3212 if (RT_VALID_PTR(pLogger))
3213 {
3214 if ( pLogger->u32Magic == RTLOGGER_MAGIC
3215 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
3216 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
3217 {
3218 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
3219 if ( RT_VALID_PTR(pGVCpu)
3220 && ((uintptr_t)pGVCpu & HOST_PAGE_OFFSET_MASK) == 0)
3221 {
3222 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
3223 PGVM const pGVM = pGVCpu->pGVM;
3224 if ( hNativeSelf == pGVCpu->hEMT
3225 && RT_VALID_PTR(pGVM))
3226 {
3227 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3228 size_t const idxBuffer = pBufDesc - &pR0Log->aBufDescs[0];
3229 if (idxBuffer < VMMLOGGER_BUFFER_COUNT)
3230 {
3231 /*
3232 * Make sure we don't recurse forever here should something in the
3233 * following code trigger logging or an assertion. Do the rest in
3234 * an inner work to avoid hitting the right margin too hard.
3235 */
3236 if (!pR0Log->fFlushing)
3237 {
3238 pR0Log->fFlushing = true;
3239 bool fFlushed;
3240 if ( !pR0Log->fFlushToParentVmmDbg
3241 && !pR0Log->fFlushToParentVmmRel)
3242 fFlushed = vmmR0LoggerFlushInnerToRing3(pGVM, pGVCpu, idxLogger, idxBuffer, pBufDesc->offBuf);
3243 else
3244 fFlushed = vmmR0LoggerFlushInnerToParent(pR0Log, pBufDesc);
3245 pR0Log->fFlushing = false;
3246 return fFlushed;
3247 }
3248
3249 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n"));
3250 }
3251 else
3252 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p: idxBuffer=%#zx\n", pLogger, pGVCpu, idxBuffer));
3253 }
3254 else
3255 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
3256 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf));
3257 }
3258 else
3259 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu));
3260 }
3261 else
3262 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
3263 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3));
3264 }
3265 else
3266 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
3267 return true;
3268}
3269
3270
3271/**
3272 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
3273 */
3274static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3275{
3276 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_RELEASE);
3277}
3278
3279
3280/**
3281 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3282 */
3283static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3284{
3285#ifdef LOG_ENABLED
3286 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_REGULAR);
3287#else
3288 RT_NOREF(pLogger, pBufDesc);
3289 return true;
3290#endif
3291}
3292
3293
3294/*
3295 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3296 */
3297DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3298{
3299#ifdef LOG_ENABLED
3300 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3301 if (pGVCpu)
3302 {
3303 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.Logger.pLogger;
3304 if (RT_VALID_PTR(pLogger))
3305 {
3306 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3307 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3308 {
3309 if (!pGVCpu->vmmr0.s.u.s.Logger.fFlushing)
3310 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3311
3312 /*
3313 * When we're flushing we _must_ return NULL here to suppress any
3314 * attempts at using the logger while in vmmR0LoggerFlushCommon.
3315 * The VMMR0EmtPrepareToBlock code may trigger logging in HM,
3316 * which will reset the buffer content before we even get to queue
3317 * the flush request. (Only an issue when VBOX_WITH_R0_LOGGING
3318 * is enabled.)
3319 */
3320 return NULL;
3321 }
3322 }
3323 }
3324#endif
3325 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3326}
3327
3328
3329/*
3330 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3331 */
3332DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3333{
3334 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3335 if (pGVCpu)
3336 {
3337 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.RelLogger.pLogger;
3338 if (RT_VALID_PTR(pLogger))
3339 {
3340 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3341 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3342 {
3343 if (!pGVCpu->vmmr0.s.u.s.RelLogger.fFlushing)
3344 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3345
3346 /* ASSUMES no LogRels hidden within the VMMR0EmtPrepareToBlock code
3347 path, so we don't return NULL here like for the debug logger... */
3348 }
3349 }
3350 }
3351 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3352}
3353
3354
3355/**
3356 * Helper for vmmR0InitLoggerSet
3357 */
3358static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
3359 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)
3360{
3361 /*
3362 * Create and configure the logger.
3363 */
3364 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3365 {
3366 pR0Log->aBufDescs[i].u32Magic = RTLOGBUFFERDESC_MAGIC;
3367 pR0Log->aBufDescs[i].uReserved = 0;
3368 pR0Log->aBufDescs[i].cbBuf = cbBuf;
3369 pR0Log->aBufDescs[i].offBuf = 0;
3370 pR0Log->aBufDescs[i].pchBuf = pchBuf + i * cbBuf;
3371 pR0Log->aBufDescs[i].pAux = &pShared->aBufs[i].AuxDesc;
3372
3373 pShared->aBufs[i].AuxDesc.fFlushedIndicator = false;
3374 pShared->aBufs[i].AuxDesc.afPadding[0] = 0;
3375 pShared->aBufs[i].AuxDesc.afPadding[1] = 0;
3376 pShared->aBufs[i].AuxDesc.afPadding[2] = 0;
3377 pShared->aBufs[i].AuxDesc.offBuf = 0;
3378 pShared->aBufs[i].pchBufR3 = pchBufR3 + i * cbBuf;
3379 }
3380 pShared->cbBuf = cbBuf;
3381
3382 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
3383 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
3384 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
3385 VMMLOGGER_BUFFER_COUNT, pR0Log->aBufDescs, RTLOGDEST_DUMMY,
3386 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
3387 NULL /*pOutputIf*/, NULL /*pvOutputIfUser*/,
3388 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
3389 if (RT_SUCCESS(rc))
3390 {
3391 PRTLOGGER pLogger = pR0Log->pLogger;
3392 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
3393 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
3394 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
3395
3396 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
3397 if (RT_SUCCESS(rc))
3398 {
3399 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
3400
3401 /*
3402 * Create the event sem the EMT waits on while flushing is happening.
3403 */
3404 rc = RTSemEventCreate(&pR0Log->hEventFlushWait);
3405 if (RT_SUCCESS(rc))
3406 return VINF_SUCCESS;
3407 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3408 }
3409 RTLogDestroy(pLogger);
3410 }
3411 pR0Log->pLogger = NULL;
3412 return rc;
3413}
3414
3415
3416/**
3417 * Worker for VMMR0CleanupVM and vmmR0InitLoggerSet that destroys one logger.
3418 */
3419static void vmmR0TermLoggerOne(PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared)
3420{
3421 RTLogDestroy(pR0Log->pLogger);
3422 pR0Log->pLogger = NULL;
3423
3424 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3425 pShared->aBufs[i].pchBufR3 = NIL_RTR3PTR;
3426
3427 RTSemEventDestroy(pR0Log->hEventFlushWait);
3428 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3429}
3430
3431
3432/**
3433 * Initializes one type of loggers for each EMT.
3434 */
3435static int vmmR0InitLoggerSet(PGVM pGVM, uint8_t idxLogger, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
3436{
3437 /* Allocate buffers first. */
3438 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus * VMMLOGGER_BUFFER_COUNT, false /*fExecutable*/);
3439 if (RT_SUCCESS(rc))
3440 {
3441 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
3442 if (RT_SUCCESS(rc))
3443 {
3444 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
3445 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
3446
3447 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
3448 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
3449
3450 /* Initialize the per-CPU loggers. */
3451 for (uint32_t i = 0; i < pGVM->cCpus; i++)
3452 {
3453 PGVMCPU pGVCpu = &pGVM->aCpus[i];
3454 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3455 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3456 rc = vmmR0InitLoggerOne(pGVCpu, idxLogger == VMMLOGGER_IDX_RELEASE, pR0Log, pShared, cbBuf,
3457 pchBuf + i * cbBuf * VMMLOGGER_BUFFER_COUNT,
3458 pchBufR3 + i * cbBuf * VMMLOGGER_BUFFER_COUNT);
3459 if (RT_FAILURE(rc))
3460 {
3461 vmmR0TermLoggerOne(pR0Log, pShared);
3462 while (i-- > 0)
3463 {
3464 pGVCpu = &pGVM->aCpus[i];
3465 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[idxLogger], &pGVCpu->vmm.s.u.aLoggers[idxLogger]);
3466 }
3467 break;
3468 }
3469 }
3470 if (RT_SUCCESS(rc))
3471 return VINF_SUCCESS;
3472
3473 /* Bail out. */
3474 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
3475 *phMapObj = NIL_RTR0MEMOBJ;
3476 }
3477 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
3478 *phMemObj = NIL_RTR0MEMOBJ;
3479 }
3480 return rc;
3481}
3482
3483
3484/**
3485 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3486 *
3487 * @returns VBox status code.
3488 * @param pGVM The global (ring-0) VM structure.
3489 */
3490static int vmmR0InitLoggers(PGVM pGVM)
3491{
3492 /*
3493 * Invalidate the ring buffer (not really necessary).
3494 */
3495 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); idx++)
3496 pGVM->vmmr0.s.LogFlusher.aRing[idx].u32 = UINT32_MAX >> 1; /* (all bits except fProcessing set) */
3497
3498 /*
3499 * Create the spinlock and flusher event semaphore.
3500 */
3501 int rc = RTSpinlockCreate(&pGVM->vmmr0.s.LogFlusher.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VM-Log-Flusher");
3502 if (RT_SUCCESS(rc))
3503 {
3504 rc = RTSemEventCreate(&pGVM->vmmr0.s.LogFlusher.hEvent);
3505 if (RT_SUCCESS(rc))
3506 {
3507 /*
3508 * Create the ring-0 release loggers.
3509 */
3510 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_RELEASE, _4K,
3511 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
3512#ifdef LOG_ENABLED
3513 if (RT_SUCCESS(rc))
3514 {
3515 /*
3516 * Create debug loggers.
3517 */
3518 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_REGULAR, _64K,
3519 &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
3520 }
3521#endif
3522 }
3523 }
3524 return rc;
3525}
3526
3527
3528/**
3529 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3530 *
3531 * @param pGVM The global (ring-0) VM structure.
3532 */
3533static void vmmR0CleanupLoggers(PGVM pGVM)
3534{
3535 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
3536 {
3537 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3538 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
3539 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[iLogger], &pGVCpu->vmm.s.u.aLoggers[iLogger]);
3540 }
3541
3542 /*
3543 * Free logger buffer memory.
3544 */
3545 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
3546 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
3547 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
3548 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
3549
3550 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
3551 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
3552 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
3553 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
3554
3555 /*
3556 * Free log flusher related stuff.
3557 */
3558 RTSpinlockDestroy(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3559 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
3560 RTSemEventDestroy(pGVM->vmmr0.s.LogFlusher.hEvent);
3561 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
3562}
3563
3564
3565/*********************************************************************************************************************************
3566* Assertions *
3567*********************************************************************************************************************************/
3568
3569/**
3570 * Installs a notification callback for ring-0 assertions.
3571 *
3572 * @param pVCpu The cross context virtual CPU structure.
3573 * @param pfnCallback Pointer to the callback.
3574 * @param pvUser The user argument.
3575 *
3576 * @return VBox status code.
3577 */
3578VMMR0_INT_DECL(int) VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser)
3579{
3580 AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
3581 AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
3582
3583 if (!pVCpu->vmmr0.s.pfnAssertCallback)
3584 {
3585 pVCpu->vmmr0.s.pfnAssertCallback = pfnCallback;
3586 pVCpu->vmmr0.s.pvAssertCallbackUser = pvUser;
3587 return VINF_SUCCESS;
3588 }
3589 return VERR_ALREADY_EXISTS;
3590}
3591
3592
3593/**
3594 * Removes the ring-0 callback.
3595 *
3596 * @param pVCpu The cross context virtual CPU structure.
3597 */
3598VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu)
3599{
3600 pVCpu->vmmr0.s.pfnAssertCallback = NULL;
3601 pVCpu->vmmr0.s.pvAssertCallbackUser = NULL;
3602}
3603
3604
3605/**
3606 * Checks whether there is a ring-0 callback notification active.
3607 *
3608 * @param pVCpu The cross context virtual CPU structure.
3609 * @returns true if there the notification is active, false otherwise.
3610 */
3611VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu)
3612{
3613 return pVCpu->vmmr0.s.pfnAssertCallback != NULL;
3614}
3615
3616
3617/*
3618 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3619 *
3620 * @returns true if the breakpoint should be hit, false if it should be ignored.
3621 */
3622DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3623{
3624#if 0
3625 return true;
3626#else
3627 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3628 if (pVM)
3629 {
3630 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3631
3632 if (pVCpu)
3633 {
3634# ifdef RT_ARCH_X86
3635 if (pVCpu->vmmr0.s.AssertJmpBuf.eip)
3636# else
3637 if (pVCpu->vmmr0.s.AssertJmpBuf.rip)
3638# endif
3639 {
3640 if (pVCpu->vmmr0.s.pfnAssertCallback)
3641 pVCpu->vmmr0.s.pfnAssertCallback(pVCpu, pVCpu->vmmr0.s.pvAssertCallbackUser);
3642 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmmr0.s.AssertJmpBuf, VERR_VMM_RING0_ASSERTION);
3643 return RT_FAILURE_NP(rc);
3644 }
3645 }
3646 }
3647# ifdef RT_OS_LINUX
3648 return true;
3649# else
3650 return false;
3651# endif
3652#endif
3653}
3654
3655
3656/*
3657 * Override this so we can push it up to ring-3.
3658 */
3659DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3660{
3661 /*
3662 * To host kernel log/whatever.
3663 */
3664 SUPR0Printf("!!R0-Assertion Failed!!\n"
3665 "Expression: %s\n"
3666 "Location : %s(%d) %s\n",
3667 pszExpr, pszFile, uLine, pszFunction);
3668
3669 /*
3670 * To the log.
3671 */
3672 LogAlways(("\n!!R0-Assertion Failed!!\n"
3673 "Expression: %s\n"
3674 "Location : %s(%d) %s\n",
3675 pszExpr, pszFile, uLine, pszFunction));
3676
3677 /*
3678 * To the global VMM buffer.
3679 */
3680 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3681 if (pVM)
3682 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3683 "\n!!R0-Assertion Failed!!\n"
3684 "Expression: %.*s\n"
3685 "Location : %s(%d) %s\n",
3686 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3687 pszFile, uLine, pszFunction);
3688
3689 /*
3690 * Continue the normal way.
3691 */
3692 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3693}
3694
3695
3696/**
3697 * Callback for RTLogFormatV which writes to the ring-3 log port.
3698 * See PFNLOGOUTPUT() for details.
3699 */
3700static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3701{
3702 for (size_t i = 0; i < cbChars; i++)
3703 {
3704 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3705 }
3706
3707 NOREF(pv);
3708 return cbChars;
3709}
3710
3711
3712/*
3713 * Override this so we can push it up to ring-3.
3714 */
3715DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3716{
3717 va_list vaCopy;
3718
3719 /*
3720 * Push the message to the loggers.
3721 */
3722 PRTLOGGER pLog = RTLogRelGetDefaultInstance();
3723 if (pLog)
3724 {
3725 va_copy(vaCopy, va);
3726 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3727 va_end(vaCopy);
3728 }
3729 pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3730 if (pLog)
3731 {
3732 va_copy(vaCopy, va);
3733 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3734 va_end(vaCopy);
3735 }
3736
3737 /*
3738 * Push it to the global VMM buffer.
3739 */
3740 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3741 if (pVM)
3742 {
3743 va_copy(vaCopy, va);
3744 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3745 va_end(vaCopy);
3746 }
3747
3748 /*
3749 * Continue the normal way.
3750 */
3751 RTAssertMsg2V(pszFormat, va);
3752}
3753
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette