VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 95248

Last change on this file since 95248 was 94624, checked in by vboxsync, 3 years ago

Runtime/log: Allow setting a custom output interface for the file destination (for encryption), bugref:9955

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 134.8 KB
Line 
1/* $Id: VMMR0.cpp 94624 2022-04-19 09:20:51Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iem.h>
26#include <VBox/vmm/iom.h>
27#include <VBox/vmm/trpm.h>
28#include <VBox/vmm/cpum.h>
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/pgm.h>
31#ifdef VBOX_WITH_NEM_R0
32# include <VBox/vmm/nem.h>
33#endif
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/stam.h>
36#include <VBox/vmm/tm.h>
37#include "VMMInternal.h"
38#include <VBox/vmm/vmcc.h>
39#include <VBox/vmm/gvm.h>
40#ifdef VBOX_WITH_PCI_PASSTHROUGH
41# include <VBox/vmm/pdmpci.h>
42#endif
43#include <VBox/vmm/apic.h>
44
45#include <VBox/vmm/gvmm.h>
46#include <VBox/vmm/gmm.h>
47#include <VBox/vmm/gim.h>
48#include <VBox/intnet.h>
49#include <VBox/vmm/hm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52#include <VBox/version.h>
53#include <VBox/log.h>
54
55#include <iprt/asm-amd64-x86.h>
56#include <iprt/assert.h>
57#include <iprt/crc.h>
58#include <iprt/initterm.h>
59#include <iprt/mem.h>
60#include <iprt/memobj.h>
61#include <iprt/mp.h>
62#include <iprt/once.h>
63#include <iprt/semaphore.h>
64#include <iprt/spinlock.h>
65#include <iprt/stdarg.h>
66#include <iprt/string.h>
67#include <iprt/thread.h>
68#include <iprt/timer.h>
69#include <iprt/time.h>
70
71#include "dtrace/VBoxVMM.h"
72
73
74#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
75# pragma intrinsic(_AddressOfReturnAddress)
76#endif
77
78#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
79# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
80#endif
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86RT_C_DECLS_BEGIN
87#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
88extern uint64_t __udivdi3(uint64_t, uint64_t);
89extern uint64_t __umoddi3(uint64_t, uint64_t);
90#endif
91RT_C_DECLS_END
92static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger);
93static int vmmR0LogFlusher(PGVM pGVM);
94static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger);
95static int vmmR0InitLoggers(PGVM pGVM);
96static void vmmR0CleanupLoggers(PGVM pGVM);
97
98
99/*********************************************************************************************************************************
100* Global Variables *
101*********************************************************************************************************************************/
102/** Drag in necessary library bits.
103 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
104struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
105{
106 { (PFNRT)RTCrc32 },
107 { (PFNRT)RTOnce },
108#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
109 { (PFNRT)__udivdi3 },
110 { (PFNRT)__umoddi3 },
111#endif
112 { NULL }
113};
114
115#ifdef RT_OS_SOLARIS
116/* Dependency information for the native solaris loader. */
117extern "C" { char _depends_on[] = "vboxdrv"; }
118#endif
119
120
121/**
122 * Initialize the module.
123 * This is called when we're first loaded.
124 *
125 * @returns 0 on success.
126 * @returns VBox status on failure.
127 * @param hMod Image handle for use in APIs.
128 */
129DECLEXPORT(int) ModuleInit(void *hMod)
130{
131#ifdef VBOX_WITH_DTRACE_R0
132 /*
133 * The first thing to do is register the static tracepoints.
134 * (Deregistration is automatic.)
135 */
136 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
137 if (RT_FAILURE(rc2))
138 return rc2;
139#endif
140 LogFlow(("ModuleInit:\n"));
141
142#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
143 /*
144 * Display the CMOS debug code.
145 */
146 ASMOutU8(0x72, 0x03);
147 uint8_t bDebugCode = ASMInU8(0x73);
148 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
149 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
150#endif
151
152 /*
153 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
154 */
155 int rc = vmmInitFormatTypes();
156 if (RT_SUCCESS(rc))
157 {
158 rc = GVMMR0Init();
159 if (RT_SUCCESS(rc))
160 {
161 rc = GMMR0Init();
162 if (RT_SUCCESS(rc))
163 {
164 rc = HMR0Init();
165 if (RT_SUCCESS(rc))
166 {
167 PDMR0Init(hMod);
168
169 rc = PGMRegisterStringFormatTypes();
170 if (RT_SUCCESS(rc))
171 {
172 rc = IntNetR0Init();
173 if (RT_SUCCESS(rc))
174 {
175#ifdef VBOX_WITH_PCI_PASSTHROUGH
176 rc = PciRawR0Init();
177#endif
178 if (RT_SUCCESS(rc))
179 {
180 rc = CPUMR0ModuleInit();
181 if (RT_SUCCESS(rc))
182 {
183#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
184 rc = vmmR0TripleFaultHackInit();
185 if (RT_SUCCESS(rc))
186#endif
187 {
188#ifdef VBOX_WITH_NEM_R0
189 rc = NEMR0Init();
190 if (RT_SUCCESS(rc))
191#endif
192 {
193 LogFlow(("ModuleInit: returns success\n"));
194 return VINF_SUCCESS;
195 }
196 }
197
198 /*
199 * Bail out.
200 */
201#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
202 vmmR0TripleFaultHackTerm();
203#endif
204 }
205 else
206 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
207#ifdef VBOX_WITH_PCI_PASSTHROUGH
208 PciRawR0Term();
209#endif
210 }
211 else
212 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
213 IntNetR0Term();
214 }
215 else
216 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
217 PGMDeregisterStringFormatTypes();
218 }
219 else
220 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
221 HMR0Term();
222 }
223 else
224 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
225 GMMR0Term();
226 }
227 else
228 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
229 GVMMR0Term();
230 }
231 else
232 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
233 vmmTermFormatTypes();
234 }
235 else
236 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
237
238 LogFlow(("ModuleInit: failed %Rrc\n", rc));
239 return rc;
240}
241
242
243/**
244 * Terminate the module.
245 * This is called when we're finally unloaded.
246 *
247 * @param hMod Image handle for use in APIs.
248 */
249DECLEXPORT(void) ModuleTerm(void *hMod)
250{
251 NOREF(hMod);
252 LogFlow(("ModuleTerm:\n"));
253
254 /*
255 * Terminate the CPUM module (Local APIC cleanup).
256 */
257 CPUMR0ModuleTerm();
258
259 /*
260 * Terminate the internal network service.
261 */
262 IntNetR0Term();
263
264 /*
265 * PGM (Darwin), HM and PciRaw global cleanup.
266 */
267#ifdef VBOX_WITH_PCI_PASSTHROUGH
268 PciRawR0Term();
269#endif
270 PGMDeregisterStringFormatTypes();
271 HMR0Term();
272#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
273 vmmR0TripleFaultHackTerm();
274#endif
275#ifdef VBOX_WITH_NEM_R0
276 NEMR0Term();
277#endif
278
279 /*
280 * Destroy the GMM and GVMM instances.
281 */
282 GMMR0Term();
283 GVMMR0Term();
284
285 vmmTermFormatTypes();
286 RTTermRunCallbacks(RTTERMREASON_UNLOAD, 0);
287
288 LogFlow(("ModuleTerm: returns\n"));
289}
290
291
292/**
293 * Initializes VMM specific members when the GVM structure is created,
294 * allocating loggers and stuff.
295 *
296 * The loggers are allocated here so that we can update their settings before
297 * doing VMMR0_DO_VMMR0_INIT and have correct logging at that time.
298 *
299 * @returns VBox status code.
300 * @param pGVM The global (ring-0) VM structure.
301 */
302VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM)
303{
304 AssertCompile(sizeof(pGVM->vmmr0.s) <= sizeof(pGVM->vmmr0.padding));
305
306 /*
307 * Initialize all members first.
308 */
309 pGVM->vmmr0.s.fCalledInitVm = false;
310 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
311 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
312 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
313 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
314 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
315 pGVM->vmmr0.s.LogFlusher.hThread = NIL_RTNATIVETHREAD;
316 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
317 pGVM->vmmr0.s.LogFlusher.idxRingHead = 0;
318 pGVM->vmmr0.s.LogFlusher.idxRingTail = 0;
319 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
320
321 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
322 {
323 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
324 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
325 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
326 pGVCpu->vmmr0.s.pPreemptState = NULL;
327 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
328 pGVCpu->vmmr0.s.AssertJmpBuf.pMirrorBuf = &pGVCpu->vmm.s.AssertJmpBuf;
329 pGVCpu->vmmr0.s.AssertJmpBuf.pvStackBuf = &pGVCpu->vmm.s.abAssertStack[0];
330 pGVCpu->vmmr0.s.AssertJmpBuf.cbStackBuf = sizeof(pGVCpu->vmm.s.abAssertStack);
331
332 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
333 pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT;
334 }
335
336 /*
337 * Create the loggers.
338 */
339 return vmmR0InitLoggers(pGVM);
340}
341
342
343/**
344 * Initiates the R0 driver for a particular VM instance.
345 *
346 * @returns VBox status code.
347 *
348 * @param pGVM The global (ring-0) VM structure.
349 * @param uSvnRev The SVN revision of the ring-3 part.
350 * @param uBuildType Build type indicator.
351 * @thread EMT(0)
352 */
353static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
354{
355 /*
356 * Match the SVN revisions and build type.
357 */
358 if (uSvnRev != VMMGetSvnRev())
359 {
360 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
361 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
362 return VERR_VMM_R0_VERSION_MISMATCH;
363 }
364 if (uBuildType != vmmGetBuildType())
365 {
366 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
367 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
368 return VERR_VMM_R0_VERSION_MISMATCH;
369 }
370
371 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
372 if (RT_FAILURE(rc))
373 return rc;
374
375 /* Don't allow this to be called more than once. */
376 if (!pGVM->vmmr0.s.fCalledInitVm)
377 pGVM->vmmr0.s.fCalledInitVm = true;
378 else
379 return VERR_ALREADY_INITIALIZED;
380
381#ifdef LOG_ENABLED
382
383 /*
384 * Register the EMT R0 logger instance for VCPU 0.
385 */
386 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
387 if (pVCpu->vmmr0.s.u.s.Logger.pLogger)
388 {
389# if 0 /* testing of the logger. */
390 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
391 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
392 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
393 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
394
395 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
396 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
397 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
398 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
399
400 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
401 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
402 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
403 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
404
405 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
406 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
407 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
408 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
409 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
410 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
411
412 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
413 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
414
415 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
416 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
417 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
418# endif
419# ifdef VBOX_WITH_R0_LOGGING
420 Log(("Switching to per-thread logging instance %p (key=%p)\n", pVCpu->vmmr0.s.u.s.Logger.pLogger, pGVM->pSession));
421 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
422 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
423# endif
424 }
425#endif /* LOG_ENABLED */
426
427 /*
428 * Check if the host supports high resolution timers or not.
429 */
430 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
431 && !RTTimerCanDoHighResolution())
432 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
433
434 /*
435 * Initialize the per VM data for GVMM and GMM.
436 */
437 rc = GVMMR0InitVM(pGVM);
438 if (RT_SUCCESS(rc))
439 {
440 /*
441 * Init HM, CPUM and PGM.
442 */
443 rc = HMR0InitVM(pGVM);
444 if (RT_SUCCESS(rc))
445 {
446 rc = CPUMR0InitVM(pGVM);
447 if (RT_SUCCESS(rc))
448 {
449 rc = PGMR0InitVM(pGVM);
450 if (RT_SUCCESS(rc))
451 {
452 rc = EMR0InitVM(pGVM);
453 if (RT_SUCCESS(rc))
454 {
455 rc = IEMR0InitVM(pGVM);
456 if (RT_SUCCESS(rc))
457 {
458 rc = IOMR0InitVM(pGVM);
459 if (RT_SUCCESS(rc))
460 {
461#ifdef VBOX_WITH_PCI_PASSTHROUGH
462 rc = PciRawR0InitVM(pGVM);
463#endif
464 if (RT_SUCCESS(rc))
465 {
466 rc = GIMR0InitVM(pGVM);
467 if (RT_SUCCESS(rc))
468 {
469 GVMMR0DoneInitVM(pGVM);
470 PGMR0DoneInitVM(pGVM);
471
472 /*
473 * Collect a bit of info for the VM release log.
474 */
475 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
476 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
477 return rc;
478
479 /* bail out*/
480 //GIMR0TermVM(pGVM);
481 }
482#ifdef VBOX_WITH_PCI_PASSTHROUGH
483 PciRawR0TermVM(pGVM);
484#endif
485 }
486 }
487 }
488 }
489 }
490 }
491 HMR0TermVM(pGVM);
492 }
493 }
494
495 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
496 return rc;
497}
498
499
500/**
501 * Does EMT specific VM initialization.
502 *
503 * @returns VBox status code.
504 * @param pGVM The ring-0 VM structure.
505 * @param idCpu The EMT that's calling.
506 */
507static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
508{
509 /* Paranoia (caller checked these already). */
510 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
511 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
512
513#if defined(LOG_ENABLED) && defined(VBOX_WITH_R0_LOGGING)
514 /*
515 * Registration of ring 0 loggers.
516 */
517 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
518 if ( pVCpu->vmmr0.s.u.s.Logger.pLogger
519 && !pVCpu->vmmr0.s.u.s.Logger.fRegistered)
520 {
521 RTLogSetDefaultInstanceThread(pVCpu->vmmr0.s.u.s.Logger.pLogger, (uintptr_t)pGVM->pSession);
522 pVCpu->vmmr0.s.u.s.Logger.fRegistered = true;
523 }
524#endif
525
526 return VINF_SUCCESS;
527}
528
529
530
531/**
532 * Terminates the R0 bits for a particular VM instance.
533 *
534 * This is normally called by ring-3 as part of the VM termination process, but
535 * may alternatively be called during the support driver session cleanup when
536 * the VM object is destroyed (see GVMM).
537 *
538 * @returns VBox status code.
539 *
540 * @param pGVM The global (ring-0) VM structure.
541 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
542 * thread.
543 * @thread EMT(0) or session clean up thread.
544 */
545VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
546{
547 /*
548 * Check EMT(0) claim if we're called from userland.
549 */
550 if (idCpu != NIL_VMCPUID)
551 {
552 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
553 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
554 if (RT_FAILURE(rc))
555 return rc;
556 }
557
558#ifdef VBOX_WITH_PCI_PASSTHROUGH
559 PciRawR0TermVM(pGVM);
560#endif
561
562 /*
563 * Tell GVMM what we're up to and check that we only do this once.
564 */
565 if (GVMMR0DoingTermVM(pGVM))
566 {
567 GIMR0TermVM(pGVM);
568
569 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
570 * here to make sure we don't leak any shared pages if we crash... */
571 HMR0TermVM(pGVM);
572 }
573
574 /*
575 * Deregister the logger for this EMT.
576 */
577 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
578
579 /*
580 * Start log flusher thread termination.
581 */
582 ASMAtomicWriteBool(&pGVM->vmmr0.s.LogFlusher.fThreadShutdown, true);
583 if (pGVM->vmmr0.s.LogFlusher.hEvent != NIL_RTSEMEVENT)
584 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
585
586 return VINF_SUCCESS;
587}
588
589
590/**
591 * This is called at the end of gvmmR0CleanupVM().
592 *
593 * @param pGVM The global (ring-0) VM structure.
594 */
595VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM)
596{
597 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
598 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
599 {
600 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
601
602 /** @todo Can we busy wait here for all thread-context hooks to be
603 * deregistered before releasing (destroying) it? Only until we find a
604 * solution for not deregistering hooks everytime we're leaving HMR0
605 * context. */
606 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu);
607 }
608
609 vmmR0CleanupLoggers(pGVM);
610}
611
612
613/**
614 * An interrupt or unhalt force flag is set, deal with it.
615 *
616 * @returns VINF_SUCCESS (or VINF_EM_HALT).
617 * @param pVCpu The cross context virtual CPU structure.
618 * @param uMWait Result from EMMonitorWaitIsActive().
619 * @param enmInterruptibility Guest CPU interruptbility level.
620 */
621static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
622{
623 Assert(!TRPMHasTrap(pVCpu));
624 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
625 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
626
627 /*
628 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
629 */
630 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
631 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
632 {
633 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
634 {
635 uint8_t u8Interrupt = 0;
636 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
637 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
638 if (RT_SUCCESS(rc))
639 {
640 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
641
642 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
643 AssertRCSuccess(rc);
644 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
645 return rc;
646 }
647 }
648 }
649 /*
650 * SMI is not implemented yet, at least not here.
651 */
652 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
653 {
654 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
655 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
656 return VINF_EM_HALT;
657 }
658 /*
659 * NMI.
660 */
661 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
662 {
663 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
664 {
665 /** @todo later. */
666 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
667 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
668 return VINF_EM_HALT;
669 }
670 }
671 /*
672 * Nested-guest virtual interrupt.
673 */
674 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
675 {
676 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
677 {
678 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
679 * here before injecting the virtual interrupt. See emR3ForcedActions
680 * for details. */
681 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
682 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
683 return VINF_EM_HALT;
684 }
685 }
686
687 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
688 {
689 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
690 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
691 return VINF_SUCCESS;
692 }
693 if (uMWait > 1)
694 {
695 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
696 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
697 return VINF_SUCCESS;
698 }
699
700 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
701 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
702 return VINF_EM_HALT;
703}
704
705
706/**
707 * This does one round of vmR3HaltGlobal1Halt().
708 *
709 * The rational here is that we'll reduce latency in interrupt situations if we
710 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
711 * MWAIT), but do one round of blocking here instead and hope the interrupt is
712 * raised in the meanwhile.
713 *
714 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
715 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
716 * ring-0 call (unless we're too close to a timer event). When the interrupt
717 * wakes us up, we'll return from ring-0 and EM will by instinct do a
718 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
719 * back to VMMR0EntryFast().
720 *
721 * @returns VINF_SUCCESS or VINF_EM_HALT.
722 * @param pGVM The ring-0 VM structure.
723 * @param pGVCpu The ring-0 virtual CPU structure.
724 *
725 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
726 * the VM module, probably to VMM. Then this would be more weird wrt
727 * parameters and statistics.
728 */
729static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
730{
731 /*
732 * Do spin stat historization.
733 */
734 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
735 { /* likely */ }
736 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
737 {
738 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
739 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
740 }
741 else
742 {
743 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
744 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
745 }
746
747 /*
748 * Flags that makes us go to ring-3.
749 */
750 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
751 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
752 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
753 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
754 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
755 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
756 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
757 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
758
759 /*
760 * Check preconditions.
761 */
762 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
763 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
764 if ( pGVCpu->vmm.s.fMayHaltInRing0
765 && !TRPMHasTrap(pGVCpu)
766 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
767 || uMWait > 1))
768 {
769 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
770 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
771 {
772 /*
773 * Interrupts pending already?
774 */
775 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
776 APICUpdatePendingInterrupts(pGVCpu);
777
778 /*
779 * Flags that wake up from the halted state.
780 */
781 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
782 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
783
784 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
785 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
786 ASMNopPause();
787
788 /*
789 * Check out how long till the next timer event.
790 */
791 uint64_t u64Delta;
792 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
793
794 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
795 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
796 {
797 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
798 APICUpdatePendingInterrupts(pGVCpu);
799
800 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
801 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
802
803 /*
804 * Wait if there is enough time to the next timer event.
805 */
806 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
807 {
808 /* If there are few other CPU cores around, we will procrastinate a
809 little before going to sleep, hoping for some device raising an
810 interrupt or similar. Though, the best thing here would be to
811 dynamically adjust the spin count according to its usfulness or
812 something... */
813 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
814 && RTMpGetOnlineCount() >= 4)
815 {
816 /** @todo Figure out how we can skip this if it hasn't help recently...
817 * @bugref{9172#c12} */
818 uint32_t cSpinLoops = 42;
819 while (cSpinLoops-- > 0)
820 {
821 ASMNopPause();
822 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
823 APICUpdatePendingInterrupts(pGVCpu);
824 ASMNopPause();
825 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
826 {
827 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
828 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
829 return VINF_EM_HALT;
830 }
831 ASMNopPause();
832 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
833 {
834 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
835 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
836 return VINF_EM_HALT;
837 }
838 ASMNopPause();
839 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
840 {
841 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
842 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
843 }
844 ASMNopPause();
845 }
846 }
847
848 /*
849 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
850 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
851 * After changing the state we must recheck the force flags of course.
852 */
853 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
854 {
855 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
856 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
857 {
858 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
859 APICUpdatePendingInterrupts(pGVCpu);
860
861 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
862 {
863 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
864 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
865 }
866
867 /* Okay, block! */
868 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
869 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
870 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
871 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
872 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
873
874 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
875 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
876 if ( rc == VINF_SUCCESS
877 || rc == VERR_INTERRUPTED)
878 {
879 /* Keep some stats like ring-3 does. */
880 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
881 if (cNsOverslept > 50000)
882 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
883 else if (cNsOverslept < -50000)
884 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
885 else
886 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
887
888 /*
889 * Recheck whether we can resume execution or have to go to ring-3.
890 */
891 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
892 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
893 {
894 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
895 APICUpdatePendingInterrupts(pGVCpu);
896 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
897 {
898 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
899 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
900 }
901 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
902 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
903 }
904 else
905 {
906 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
907 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
908 }
909 }
910 else
911 {
912 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
913 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
914 }
915 }
916 else
917 {
918 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
919 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
920 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
921 }
922 }
923 else
924 {
925 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
926 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
927 }
928 }
929 else
930 {
931 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
932 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
933 }
934 }
935 else
936 {
937 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
938 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
939 }
940 }
941 else
942 {
943 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
944 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
945 }
946 }
947 else
948 {
949 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
950 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
951 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
952 }
953
954 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
955 return VINF_EM_HALT;
956}
957
958
959/**
960 * VMM ring-0 thread-context callback.
961 *
962 * This does common HM state updating and calls the HM-specific thread-context
963 * callback.
964 *
965 * This is used together with RTThreadCtxHookCreate() on platforms which
966 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
967 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
968 *
969 * @param enmEvent The thread-context event.
970 * @param pvUser Opaque pointer to the VMCPU.
971 *
972 * @thread EMT(pvUser)
973 */
974static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
975{
976 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
977
978 switch (enmEvent)
979 {
980 case RTTHREADCTXEVENT_IN:
981 {
982 /*
983 * Linux may call us with preemption enabled (really!) but technically we
984 * cannot get preempted here, otherwise we end up in an infinite recursion
985 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
986 * ad infinitum). Let's just disable preemption for now...
987 */
988 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
989 * preemption after doing the callout (one or two functions up the
990 * call chain). */
991 /** @todo r=ramshankar: See @bugref{5313#c30}. */
992 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
993 RTThreadPreemptDisable(&ParanoidPreemptState);
994
995 /* We need to update the VCPU <-> host CPU mapping. */
996 RTCPUID idHostCpu;
997 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
998 pVCpu->iHostCpuSet = iHostCpuSet;
999 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1000
1001 /* In the very unlikely event that the GIP delta for the CPU we're
1002 rescheduled needs calculating, try force a return to ring-3.
1003 We unfortunately cannot do the measurements right here. */
1004 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1005 { /* likely */ }
1006 else
1007 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1008
1009 /* Invoke the HM-specific thread-context callback. */
1010 HMR0ThreadCtxCallback(enmEvent, pvUser);
1011
1012 /* Restore preemption. */
1013 RTThreadPreemptRestore(&ParanoidPreemptState);
1014 break;
1015 }
1016
1017 case RTTHREADCTXEVENT_OUT:
1018 {
1019 /* Invoke the HM-specific thread-context callback. */
1020 HMR0ThreadCtxCallback(enmEvent, pvUser);
1021
1022 /*
1023 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1024 * have the same host CPU associated with it.
1025 */
1026 pVCpu->iHostCpuSet = UINT32_MAX;
1027 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1028 break;
1029 }
1030
1031 default:
1032 /* Invoke the HM-specific thread-context callback. */
1033 HMR0ThreadCtxCallback(enmEvent, pvUser);
1034 break;
1035 }
1036}
1037
1038
1039/**
1040 * Creates thread switching hook for the current EMT thread.
1041 *
1042 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1043 * platform does not implement switcher hooks, no hooks will be create and the
1044 * member set to NIL_RTTHREADCTXHOOK.
1045 *
1046 * @returns VBox status code.
1047 * @param pVCpu The cross context virtual CPU structure.
1048 * @thread EMT(pVCpu)
1049 */
1050VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1051{
1052 VMCPU_ASSERT_EMT(pVCpu);
1053 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1054
1055#if 1 /* To disable this stuff change to zero. */
1056 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1057 if (RT_SUCCESS(rc))
1058 {
1059 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1060 return rc;
1061 }
1062#else
1063 RT_NOREF(vmmR0ThreadCtxCallback);
1064 int rc = VERR_NOT_SUPPORTED;
1065#endif
1066
1067 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1068 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1069 if (rc == VERR_NOT_SUPPORTED)
1070 return VINF_SUCCESS;
1071
1072 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1073 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1074}
1075
1076
1077/**
1078 * Destroys the thread switching hook for the specified VCPU.
1079 *
1080 * @param pVCpu The cross context virtual CPU structure.
1081 * @remarks Can be called from any thread.
1082 */
1083VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1084{
1085 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1086 AssertRC(rc);
1087 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1088}
1089
1090
1091/**
1092 * Disables the thread switching hook for this VCPU (if we got one).
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @thread EMT(pVCpu)
1096 *
1097 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1098 * this call. This means you have to be careful with what you do!
1099 */
1100VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1101{
1102 /*
1103 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1104 * @bugref{7726#c19} explains the need for this trick:
1105 *
1106 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1107 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1108 * longjmp & normal return to ring-3, which opens a window where we may be
1109 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1110 * the CPU starts executing a different EMT. Both functions first disables
1111 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1112 * an opening for getting preempted.
1113 */
1114 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1115 * all the time. */
1116
1117 /*
1118 * Disable the context hook, if we got one.
1119 */
1120 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1121 {
1122 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1123 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1124 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1125 AssertRC(rc);
1126 }
1127}
1128
1129
1130/**
1131 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1132 *
1133 * @returns true if registered, false otherwise.
1134 * @param pVCpu The cross context virtual CPU structure.
1135 */
1136DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1137{
1138 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1139}
1140
1141
1142/**
1143 * Whether thread-context hooks are registered for this VCPU.
1144 *
1145 * @returns true if registered, false otherwise.
1146 * @param pVCpu The cross context virtual CPU structure.
1147 */
1148VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1149{
1150 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1151}
1152
1153
1154/**
1155 * Returns the ring-0 release logger instance.
1156 *
1157 * @returns Pointer to release logger, NULL if not configured.
1158 * @param pVCpu The cross context virtual CPU structure of the caller.
1159 * @thread EMT(pVCpu)
1160 */
1161VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1162{
1163 return pVCpu->vmmr0.s.u.s.RelLogger.pLogger;
1164}
1165
1166
1167#ifdef VBOX_WITH_STATISTICS
1168/**
1169 * Record return code statistics
1170 * @param pVM The cross context VM structure.
1171 * @param pVCpu The cross context virtual CPU structure.
1172 * @param rc The status code.
1173 */
1174static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1175{
1176 /*
1177 * Collect statistics.
1178 */
1179 switch (rc)
1180 {
1181 case VINF_SUCCESS:
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1183 break;
1184 case VINF_EM_RAW_INTERRUPT:
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1186 break;
1187 case VINF_EM_RAW_INTERRUPT_HYPER:
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1189 break;
1190 case VINF_EM_RAW_GUEST_TRAP:
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1192 break;
1193 case VINF_EM_RAW_RING_SWITCH:
1194 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1195 break;
1196 case VINF_EM_RAW_RING_SWITCH_INT:
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1198 break;
1199 case VINF_EM_RAW_STALE_SELECTOR:
1200 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1201 break;
1202 case VINF_EM_RAW_IRET_TRAP:
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1204 break;
1205 case VINF_IOM_R3_IOPORT_READ:
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1207 break;
1208 case VINF_IOM_R3_IOPORT_WRITE:
1209 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1210 break;
1211 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1213 break;
1214 case VINF_IOM_R3_MMIO_READ:
1215 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1216 break;
1217 case VINF_IOM_R3_MMIO_WRITE:
1218 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1219 break;
1220 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1221 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1222 break;
1223 case VINF_IOM_R3_MMIO_READ_WRITE:
1224 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1225 break;
1226 case VINF_PATM_HC_MMIO_PATCH_READ:
1227 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1228 break;
1229 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1230 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1231 break;
1232 case VINF_CPUM_R3_MSR_READ:
1233 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1234 break;
1235 case VINF_CPUM_R3_MSR_WRITE:
1236 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1237 break;
1238 case VINF_EM_RAW_EMULATE_INSTR:
1239 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1240 break;
1241 case VINF_PATCH_EMULATE_INSTR:
1242 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1243 break;
1244 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1245 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1246 break;
1247 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1248 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1249 break;
1250 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1251 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1252 break;
1253 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1255 break;
1256 case VINF_CSAM_PENDING_ACTION:
1257 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1258 break;
1259 case VINF_PGM_SYNC_CR3:
1260 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1261 break;
1262 case VINF_PATM_PATCH_INT3:
1263 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1264 break;
1265 case VINF_PATM_PATCH_TRAP_PF:
1266 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1267 break;
1268 case VINF_PATM_PATCH_TRAP_GP:
1269 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1270 break;
1271 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1272 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1273 break;
1274 case VINF_EM_RESCHEDULE_REM:
1275 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1276 break;
1277 case VINF_EM_RAW_TO_R3:
1278 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1279 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1280 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1281 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1283 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1284 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1285 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1286 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1287 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1288 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1289 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1290 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1291 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1292 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1293 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1294 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1295 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1296 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1297 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1298 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1299 else
1300 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1301 break;
1302
1303 case VINF_EM_RAW_TIMER_PENDING:
1304 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1305 break;
1306 case VINF_EM_RAW_INTERRUPT_PENDING:
1307 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1308 break;
1309 case VINF_PATM_DUPLICATE_FUNCTION:
1310 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1311 break;
1312 case VINF_PGM_POOL_FLUSH_PENDING:
1313 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1314 break;
1315 case VINF_EM_PENDING_REQUEST:
1316 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1317 break;
1318 case VINF_EM_HM_PATCH_TPR_INSTR:
1319 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1320 break;
1321 default:
1322 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1323 break;
1324 }
1325}
1326#endif /* VBOX_WITH_STATISTICS */
1327
1328
1329/**
1330 * The Ring 0 entry point, called by the fast-ioctl path.
1331 *
1332 * @param pGVM The global (ring-0) VM structure.
1333 * @param pVMIgnored The cross context VM structure. The return code is
1334 * stored in pVM->vmm.s.iLastGZRc.
1335 * @param idCpu The Virtual CPU ID of the calling EMT.
1336 * @param enmOperation Which operation to execute.
1337 * @remarks Assume called with interrupts _enabled_.
1338 */
1339VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1340{
1341 RT_NOREF(pVMIgnored);
1342
1343 /*
1344 * Validation.
1345 */
1346 if ( idCpu < pGVM->cCpus
1347 && pGVM->cCpus == pGVM->cCpusUnsafe)
1348 { /*likely*/ }
1349 else
1350 {
1351 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1352 return;
1353 }
1354
1355 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1356 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1357 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1358 && pGVCpu->hNativeThreadR0 == hNativeThread))
1359 { /* likely */ }
1360 else
1361 {
1362 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1363 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1364 return;
1365 }
1366
1367 /*
1368 * Perform requested operation.
1369 */
1370 switch (enmOperation)
1371 {
1372 /*
1373 * Run guest code using the available hardware acceleration technology.
1374 */
1375 case VMMR0_DO_HM_RUN:
1376 {
1377 for (;;) /* hlt loop */
1378 {
1379 /*
1380 * Disable ring-3 calls & blocking till we've successfully entered HM.
1381 * Otherwise we sometimes end up blocking at the finall Log4 statement
1382 * in VMXR0Enter, while still in a somewhat inbetween state.
1383 */
1384 VMMRZCallRing3Disable(pGVCpu);
1385
1386 /*
1387 * Disable preemption.
1388 */
1389 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1390 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1391 RTThreadPreemptDisable(&PreemptState);
1392 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1393
1394 /*
1395 * Get the host CPU identifiers, make sure they are valid and that
1396 * we've got a TSC delta for the CPU.
1397 */
1398 RTCPUID idHostCpu;
1399 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1400 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1401 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1402 {
1403 pGVCpu->iHostCpuSet = iHostCpuSet;
1404 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1405
1406 /*
1407 * Update the periodic preemption timer if it's active.
1408 */
1409 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1410 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1411
1412#ifdef VMM_R0_TOUCH_FPU
1413 /*
1414 * Make sure we've got the FPU state loaded so and we don't need to clear
1415 * CR0.TS and get out of sync with the host kernel when loading the guest
1416 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1417 */
1418 CPUMR0TouchHostFpu();
1419#endif
1420 int rc;
1421 bool fPreemptRestored = false;
1422 if (!HMR0SuspendPending())
1423 {
1424 /*
1425 * Enable the context switching hook.
1426 */
1427 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1428 {
1429 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1430 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1431 }
1432
1433 /*
1434 * Enter HM context.
1435 */
1436 rc = HMR0Enter(pGVCpu);
1437 if (RT_SUCCESS(rc))
1438 {
1439 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1440
1441 /*
1442 * When preemption hooks are in place, enable preemption now that
1443 * we're in HM context.
1444 */
1445 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1446 {
1447 fPreemptRestored = true;
1448 pGVCpu->vmmr0.s.pPreemptState = NULL;
1449 RTThreadPreemptRestore(&PreemptState);
1450 }
1451 VMMRZCallRing3Enable(pGVCpu);
1452
1453 /*
1454 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1455 */
1456 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmmr0.s.AssertJmpBuf, HMR0RunGuestCode, pGVM, pGVCpu);
1457
1458 /*
1459 * Assert sanity on the way out. Using manual assertions code here as normal
1460 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1461 */
1462 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1463 && RT_SUCCESS_NP(rc)
1464 && rc != VERR_VMM_RING0_ASSERTION ))
1465 {
1466 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1467 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1468 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1469 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1470 }
1471#if 0
1472 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1473 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1474 {
1475 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1476 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1477 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1478 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1479 }
1480#endif
1481
1482 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */
1483 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1484 }
1485 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1486
1487 /*
1488 * Invalidate the host CPU identifiers before we disable the context
1489 * hook / restore preemption.
1490 */
1491 pGVCpu->iHostCpuSet = UINT32_MAX;
1492 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1493
1494 /*
1495 * Disable context hooks. Due to unresolved cleanup issues, we
1496 * cannot leave the hooks enabled when we return to ring-3.
1497 *
1498 * Note! At the moment HM may also have disabled the hook
1499 * when we get here, but the IPRT API handles that.
1500 */
1501 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1502 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1503 }
1504 /*
1505 * The system is about to go into suspend mode; go back to ring 3.
1506 */
1507 else
1508 {
1509 pGVCpu->iHostCpuSet = UINT32_MAX;
1510 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1511 rc = VINF_EM_RAW_INTERRUPT;
1512 }
1513
1514 /** @todo When HM stops messing with the context hook state, we'll disable
1515 * preemption again before the RTThreadCtxHookDisable call. */
1516 if (!fPreemptRestored)
1517 {
1518 pGVCpu->vmmr0.s.pPreemptState = NULL;
1519 RTThreadPreemptRestore(&PreemptState);
1520 }
1521
1522 pGVCpu->vmm.s.iLastGZRc = rc;
1523
1524 /* Fire dtrace probe and collect statistics. */
1525 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1526#ifdef VBOX_WITH_STATISTICS
1527 vmmR0RecordRC(pGVM, pGVCpu, rc);
1528#endif
1529 VMMRZCallRing3Enable(pGVCpu);
1530
1531 /*
1532 * If this is a halt.
1533 */
1534 if (rc != VINF_EM_HALT)
1535 { /* we're not in a hurry for a HLT, so prefer this path */ }
1536 else
1537 {
1538 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1539 if (rc == VINF_SUCCESS)
1540 {
1541 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1542 continue;
1543 }
1544 pGVCpu->vmm.s.cR0HaltsToRing3++;
1545 }
1546 }
1547 /*
1548 * Invalid CPU set index or TSC delta in need of measuring.
1549 */
1550 else
1551 {
1552 pGVCpu->vmmr0.s.pPreemptState = NULL;
1553 pGVCpu->iHostCpuSet = UINT32_MAX;
1554 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1555 RTThreadPreemptRestore(&PreemptState);
1556
1557 VMMRZCallRing3Enable(pGVCpu);
1558
1559 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1560 {
1561 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1562 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1563 0 /*default cTries*/);
1564 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1565 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1566 else
1567 pGVCpu->vmm.s.iLastGZRc = rc;
1568 }
1569 else
1570 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1571 }
1572 break;
1573 } /* halt loop. */
1574 break;
1575 }
1576
1577#ifdef VBOX_WITH_NEM_R0
1578# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1579 case VMMR0_DO_NEM_RUN:
1580 {
1581 /*
1582 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1583 */
1584# ifdef VBOXSTRICTRC_STRICT_ENABLED
1585 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1586# else
1587 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, NEMR0RunGuestCode, pGVM, idCpu);
1588# endif
1589 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1590
1591 pGVCpu->vmm.s.iLastGZRc = rc;
1592
1593 /*
1594 * Fire dtrace probe and collect statistics.
1595 */
1596 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1597# ifdef VBOX_WITH_STATISTICS
1598 vmmR0RecordRC(pGVM, pGVCpu, rc);
1599# endif
1600 break;
1601 }
1602# endif
1603#endif
1604
1605 /*
1606 * For profiling.
1607 */
1608 case VMMR0_DO_NOP:
1609 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1610 break;
1611
1612 /*
1613 * Shouldn't happen.
1614 */
1615 default:
1616 AssertMsgFailed(("%#x\n", enmOperation));
1617 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1618 break;
1619 }
1620}
1621
1622
1623/**
1624 * Validates a session or VM session argument.
1625 *
1626 * @returns true / false accordingly.
1627 * @param pGVM The global (ring-0) VM structure.
1628 * @param pClaimedSession The session claim to validate.
1629 * @param pSession The session argument.
1630 */
1631DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1632{
1633 /* This must be set! */
1634 if (!pSession)
1635 return false;
1636
1637 /* Only one out of the two. */
1638 if (pGVM && pClaimedSession)
1639 return false;
1640 if (pGVM)
1641 pClaimedSession = pGVM->pSession;
1642 return pClaimedSession == pSession;
1643}
1644
1645
1646/**
1647 * VMMR0EntryEx worker function, either called directly or when ever possible
1648 * called thru a longjmp so we can exit safely on failure.
1649 *
1650 * @returns VBox status code.
1651 * @param pGVM The global (ring-0) VM structure.
1652 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1653 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1654 * @param enmOperation Which operation to execute.
1655 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1656 * The support driver validates this if it's present.
1657 * @param u64Arg Some simple constant argument.
1658 * @param pSession The session of the caller.
1659 *
1660 * @remarks Assume called with interrupts _enabled_.
1661 */
1662DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1663 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1664{
1665 /*
1666 * Validate pGVM and idCpu for consistency and validity.
1667 */
1668 if (pGVM != NULL)
1669 {
1670 if (RT_LIKELY(((uintptr_t)pGVM & HOST_PAGE_OFFSET_MASK) == 0))
1671 { /* likely */ }
1672 else
1673 {
1674 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1675 return VERR_INVALID_POINTER;
1676 }
1677
1678 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1679 { /* likely */ }
1680 else
1681 {
1682 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1683 return VERR_INVALID_PARAMETER;
1684 }
1685
1686 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1687 && pGVM->enmVMState <= VMSTATE_TERMINATED
1688 && pGVM->pSession == pSession
1689 && pGVM->pSelf == pGVM))
1690 { /* likely */ }
1691 else
1692 {
1693 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1694 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1695 return VERR_INVALID_POINTER;
1696 }
1697 }
1698 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1699 { /* likely */ }
1700 else
1701 {
1702 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1703 return VERR_INVALID_PARAMETER;
1704 }
1705
1706 /*
1707 * Process the request.
1708 */
1709 int rc;
1710 switch (enmOperation)
1711 {
1712 /*
1713 * GVM requests
1714 */
1715 case VMMR0_DO_GVMM_CREATE_VM:
1716 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1717 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1718 else
1719 rc = VERR_INVALID_PARAMETER;
1720 break;
1721
1722 case VMMR0_DO_GVMM_DESTROY_VM:
1723 if (pReqHdr == NULL && u64Arg == 0)
1724 rc = GVMMR0DestroyVM(pGVM);
1725 else
1726 rc = VERR_INVALID_PARAMETER;
1727 break;
1728
1729 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1730 if (pGVM != NULL)
1731 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1732 else
1733 rc = VERR_INVALID_PARAMETER;
1734 break;
1735
1736 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1737 if (pGVM != NULL)
1738 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1739 else
1740 rc = VERR_INVALID_PARAMETER;
1741 break;
1742
1743 case VMMR0_DO_GVMM_REGISTER_WORKER_THREAD:
1744 if (pGVM != NULL && pReqHdr && pReqHdr->cbReq == sizeof(GVMMREGISTERWORKERTHREADREQ))
1745 rc = GVMMR0RegisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg,
1746 ((PGVMMREGISTERWORKERTHREADREQ)(pReqHdr))->hNativeThreadR3);
1747 else
1748 rc = VERR_INVALID_PARAMETER;
1749 break;
1750
1751 case VMMR0_DO_GVMM_DEREGISTER_WORKER_THREAD:
1752 if (pGVM != NULL)
1753 rc = GVMMR0DeregisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg);
1754 else
1755 rc = VERR_INVALID_PARAMETER;
1756 break;
1757
1758 case VMMR0_DO_GVMM_SCHED_HALT:
1759 if (pReqHdr)
1760 return VERR_INVALID_PARAMETER;
1761 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1762 break;
1763
1764 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1765 if (pReqHdr || u64Arg)
1766 return VERR_INVALID_PARAMETER;
1767 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1768 break;
1769
1770 case VMMR0_DO_GVMM_SCHED_POKE:
1771 if (pReqHdr || u64Arg)
1772 return VERR_INVALID_PARAMETER;
1773 rc = GVMMR0SchedPoke(pGVM, idCpu);
1774 break;
1775
1776 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1777 if (u64Arg)
1778 return VERR_INVALID_PARAMETER;
1779 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1780 break;
1781
1782 case VMMR0_DO_GVMM_SCHED_POLL:
1783 if (pReqHdr || u64Arg > 1)
1784 return VERR_INVALID_PARAMETER;
1785 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1786 break;
1787
1788 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1789 if (u64Arg)
1790 return VERR_INVALID_PARAMETER;
1791 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1792 break;
1793
1794 case VMMR0_DO_GVMM_RESET_STATISTICS:
1795 if (u64Arg)
1796 return VERR_INVALID_PARAMETER;
1797 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1798 break;
1799
1800 /*
1801 * Initialize the R0 part of a VM instance.
1802 */
1803 case VMMR0_DO_VMMR0_INIT:
1804 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1805 break;
1806
1807 /*
1808 * Does EMT specific ring-0 init.
1809 */
1810 case VMMR0_DO_VMMR0_INIT_EMT:
1811 if (idCpu == NIL_VMCPUID)
1812 return VERR_INVALID_CPU_ID;
1813 rc = vmmR0InitVMEmt(pGVM, idCpu);
1814 break;
1815
1816 /*
1817 * Terminate the R0 part of a VM instance.
1818 */
1819 case VMMR0_DO_VMMR0_TERM:
1820 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1821 break;
1822
1823 /*
1824 * Update release or debug logger instances.
1825 */
1826 case VMMR0_DO_VMMR0_UPDATE_LOGGERS:
1827 if (idCpu == NIL_VMCPUID)
1828 return VERR_INVALID_CPU_ID;
1829 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr != NULL)
1830 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, (size_t)u64Arg);
1831 else
1832 return VERR_INVALID_PARAMETER;
1833 break;
1834
1835 /*
1836 * Log flusher thread.
1837 */
1838 case VMMR0_DO_VMMR0_LOG_FLUSHER:
1839 if (idCpu != NIL_VMCPUID)
1840 return VERR_INVALID_CPU_ID;
1841 if (pReqHdr == NULL && pGVM != NULL)
1842 rc = vmmR0LogFlusher(pGVM);
1843 else
1844 return VERR_INVALID_PARAMETER;
1845 break;
1846
1847 /*
1848 * Wait for the flush to finish with all the buffers for the given logger.
1849 */
1850 case VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED:
1851 if (idCpu == NIL_VMCPUID)
1852 return VERR_INVALID_CPU_ID;
1853 if (u64Arg < VMMLOGGER_IDX_MAX && pReqHdr == NULL)
1854 rc = vmmR0LogWaitFlushed(pGVM, idCpu /*idCpu*/, (size_t)u64Arg);
1855 else
1856 return VERR_INVALID_PARAMETER;
1857 break;
1858
1859 /*
1860 * Attempt to enable hm mode and check the current setting.
1861 */
1862 case VMMR0_DO_HM_ENABLE:
1863 rc = HMR0EnableAllCpus(pGVM);
1864 break;
1865
1866 /*
1867 * Setup the hardware accelerated session.
1868 */
1869 case VMMR0_DO_HM_SETUP_VM:
1870 rc = HMR0SetupVM(pGVM);
1871 break;
1872
1873 /*
1874 * PGM wrappers.
1875 */
1876 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1877 if (idCpu == NIL_VMCPUID)
1878 return VERR_INVALID_CPU_ID;
1879 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1880 break;
1881
1882 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1883 if (idCpu == NIL_VMCPUID)
1884 return VERR_INVALID_CPU_ID;
1885 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1886 break;
1887
1888 case VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE:
1889 if (idCpu == NIL_VMCPUID)
1890 return VERR_INVALID_CPU_ID;
1891 rc = PGMR0PhysAllocateLargePage(pGVM, idCpu, u64Arg);
1892 break;
1893
1894 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1895 if (idCpu != 0)
1896 return VERR_INVALID_CPU_ID;
1897 rc = PGMR0PhysSetupIoMmu(pGVM);
1898 break;
1899
1900 case VMMR0_DO_PGM_POOL_GROW:
1901 if (idCpu == NIL_VMCPUID)
1902 return VERR_INVALID_CPU_ID;
1903 rc = PGMR0PoolGrow(pGVM, idCpu);
1904 break;
1905
1906 case VMMR0_DO_PGM_PHYS_HANDLER_INIT:
1907 if (idCpu != 0 || pReqHdr != NULL || u64Arg > UINT32_MAX)
1908 return VERR_INVALID_PARAMETER;
1909 rc = PGMR0PhysHandlerInitReqHandler(pGVM, (uint32_t)u64Arg);
1910 break;
1911
1912 /*
1913 * GMM wrappers.
1914 */
1915 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1916 if (u64Arg)
1917 return VERR_INVALID_PARAMETER;
1918 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1919 break;
1920
1921 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1922 if (u64Arg)
1923 return VERR_INVALID_PARAMETER;
1924 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1925 break;
1926
1927 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1928 if (u64Arg)
1929 return VERR_INVALID_PARAMETER;
1930 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1931 break;
1932
1933 case VMMR0_DO_GMM_FREE_PAGES:
1934 if (u64Arg)
1935 return VERR_INVALID_PARAMETER;
1936 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1937 break;
1938
1939 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1940 if (u64Arg)
1941 return VERR_INVALID_PARAMETER;
1942 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1943 break;
1944
1945 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1946 if (u64Arg)
1947 return VERR_INVALID_PARAMETER;
1948 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1949 break;
1950
1951 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1952 if (idCpu == NIL_VMCPUID)
1953 return VERR_INVALID_CPU_ID;
1954 if (u64Arg)
1955 return VERR_INVALID_PARAMETER;
1956 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1957 break;
1958
1959 case VMMR0_DO_GMM_BALLOONED_PAGES:
1960 if (u64Arg)
1961 return VERR_INVALID_PARAMETER;
1962 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1963 break;
1964
1965 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1966 if (u64Arg)
1967 return VERR_INVALID_PARAMETER;
1968 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1969 break;
1970
1971 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1972 if (idCpu == NIL_VMCPUID)
1973 return VERR_INVALID_CPU_ID;
1974 if (u64Arg)
1975 return VERR_INVALID_PARAMETER;
1976 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1977 break;
1978
1979 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1980 if (idCpu == NIL_VMCPUID)
1981 return VERR_INVALID_CPU_ID;
1982 if (u64Arg)
1983 return VERR_INVALID_PARAMETER;
1984 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1985 break;
1986
1987 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1988 if (idCpu == NIL_VMCPUID)
1989 return VERR_INVALID_CPU_ID;
1990 if ( u64Arg
1991 || pReqHdr)
1992 return VERR_INVALID_PARAMETER;
1993 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1994 break;
1995
1996#ifdef VBOX_WITH_PAGE_SHARING
1997 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1998 {
1999 if (idCpu == NIL_VMCPUID)
2000 return VERR_INVALID_CPU_ID;
2001 if ( u64Arg
2002 || pReqHdr)
2003 return VERR_INVALID_PARAMETER;
2004 rc = GMMR0CheckSharedModules(pGVM, idCpu);
2005 break;
2006 }
2007#endif
2008
2009#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2010 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2011 if (u64Arg)
2012 return VERR_INVALID_PARAMETER;
2013 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2014 break;
2015#endif
2016
2017 case VMMR0_DO_GMM_QUERY_STATISTICS:
2018 if (u64Arg)
2019 return VERR_INVALID_PARAMETER;
2020 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2021 break;
2022
2023 case VMMR0_DO_GMM_RESET_STATISTICS:
2024 if (u64Arg)
2025 return VERR_INVALID_PARAMETER;
2026 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2027 break;
2028
2029 /*
2030 * A quick GCFGM mock-up.
2031 */
2032 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2033 case VMMR0_DO_GCFGM_SET_VALUE:
2034 case VMMR0_DO_GCFGM_QUERY_VALUE:
2035 {
2036 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2037 return VERR_INVALID_PARAMETER;
2038 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2039 if (pReq->Hdr.cbReq != sizeof(*pReq))
2040 return VERR_INVALID_PARAMETER;
2041 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2042 {
2043 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2044 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2045 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2046 }
2047 else
2048 {
2049 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2050 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2051 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2052 }
2053 break;
2054 }
2055
2056 /*
2057 * PDM Wrappers.
2058 */
2059 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2060 {
2061 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2062 return VERR_INVALID_PARAMETER;
2063 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2064 break;
2065 }
2066
2067 case VMMR0_DO_PDM_DEVICE_CREATE:
2068 {
2069 if (!pReqHdr || u64Arg || idCpu != 0)
2070 return VERR_INVALID_PARAMETER;
2071 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2072 break;
2073 }
2074
2075 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2076 {
2077 if (!pReqHdr || u64Arg)
2078 return VERR_INVALID_PARAMETER;
2079 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2080 break;
2081 }
2082
2083 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2084 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2085 {
2086 if (!pReqHdr || u64Arg || idCpu != 0)
2087 return VERR_INVALID_PARAMETER;
2088 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2089 break;
2090 }
2091
2092 case VMMR0_DO_PDM_QUEUE_CREATE:
2093 {
2094 if (!pReqHdr || u64Arg || idCpu != 0)
2095 return VERR_INVALID_PARAMETER;
2096 rc = PDMR0QueueCreateReqHandler(pGVM, (PPDMQUEUECREATEREQ)pReqHdr);
2097 break;
2098 }
2099
2100 /*
2101 * Requests to the internal networking service.
2102 */
2103 case VMMR0_DO_INTNET_OPEN:
2104 {
2105 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2106 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2107 return VERR_INVALID_PARAMETER;
2108 rc = IntNetR0OpenReq(pSession, pReq);
2109 break;
2110 }
2111
2112 case VMMR0_DO_INTNET_IF_CLOSE:
2113 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2114 return VERR_INVALID_PARAMETER;
2115 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2116 break;
2117
2118
2119 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2120 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2121 return VERR_INVALID_PARAMETER;
2122 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2123 break;
2124
2125 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2126 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2127 return VERR_INVALID_PARAMETER;
2128 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2129 break;
2130
2131 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2132 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2133 return VERR_INVALID_PARAMETER;
2134 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2135 break;
2136
2137 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2138 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2139 return VERR_INVALID_PARAMETER;
2140 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2141 break;
2142
2143 case VMMR0_DO_INTNET_IF_SEND:
2144 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2145 return VERR_INVALID_PARAMETER;
2146 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2147 break;
2148
2149 case VMMR0_DO_INTNET_IF_WAIT:
2150 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2151 return VERR_INVALID_PARAMETER;
2152 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2153 break;
2154
2155 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2156 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2157 return VERR_INVALID_PARAMETER;
2158 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2159 break;
2160
2161#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2162 /*
2163 * Requests to host PCI driver service.
2164 */
2165 case VMMR0_DO_PCIRAW_REQ:
2166 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2167 return VERR_INVALID_PARAMETER;
2168 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2169 break;
2170#endif
2171
2172 /*
2173 * NEM requests.
2174 */
2175#ifdef VBOX_WITH_NEM_R0
2176# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2177 case VMMR0_DO_NEM_INIT_VM:
2178 if (u64Arg || pReqHdr || idCpu != 0)
2179 return VERR_INVALID_PARAMETER;
2180 rc = NEMR0InitVM(pGVM);
2181 break;
2182
2183 case VMMR0_DO_NEM_INIT_VM_PART_2:
2184 if (u64Arg || pReqHdr || idCpu != 0)
2185 return VERR_INVALID_PARAMETER;
2186 rc = NEMR0InitVMPart2(pGVM);
2187 break;
2188
2189 case VMMR0_DO_NEM_MAP_PAGES:
2190 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2191 return VERR_INVALID_PARAMETER;
2192 rc = NEMR0MapPages(pGVM, idCpu);
2193 break;
2194
2195 case VMMR0_DO_NEM_UNMAP_PAGES:
2196 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2197 return VERR_INVALID_PARAMETER;
2198 rc = NEMR0UnmapPages(pGVM, idCpu);
2199 break;
2200
2201 case VMMR0_DO_NEM_EXPORT_STATE:
2202 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2203 return VERR_INVALID_PARAMETER;
2204 rc = NEMR0ExportState(pGVM, idCpu);
2205 break;
2206
2207 case VMMR0_DO_NEM_IMPORT_STATE:
2208 if (pReqHdr || idCpu == NIL_VMCPUID)
2209 return VERR_INVALID_PARAMETER;
2210 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2211 break;
2212
2213 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2214 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2215 return VERR_INVALID_PARAMETER;
2216 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2217 break;
2218
2219 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2220 if (pReqHdr || idCpu == NIL_VMCPUID)
2221 return VERR_INVALID_PARAMETER;
2222 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2223 break;
2224
2225 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2226 if (u64Arg || pReqHdr)
2227 return VERR_INVALID_PARAMETER;
2228 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2229 break;
2230
2231# if 1 && defined(DEBUG_bird)
2232 case VMMR0_DO_NEM_EXPERIMENT:
2233 if (pReqHdr)
2234 return VERR_INVALID_PARAMETER;
2235 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2236 break;
2237# endif
2238# endif
2239#endif
2240
2241 /*
2242 * IOM requests.
2243 */
2244 case VMMR0_DO_IOM_GROW_IO_PORTS:
2245 {
2246 if (pReqHdr || idCpu != 0)
2247 return VERR_INVALID_PARAMETER;
2248 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2249 break;
2250 }
2251
2252 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2253 {
2254 if (pReqHdr || idCpu != 0)
2255 return VERR_INVALID_PARAMETER;
2256 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2257 break;
2258 }
2259
2260 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2261 {
2262 if (pReqHdr || idCpu != 0)
2263 return VERR_INVALID_PARAMETER;
2264 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2265 break;
2266 }
2267
2268 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2269 {
2270 if (pReqHdr || idCpu != 0)
2271 return VERR_INVALID_PARAMETER;
2272 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2273 break;
2274 }
2275
2276 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2277 {
2278 if (pReqHdr || idCpu != 0)
2279 return VERR_INVALID_PARAMETER;
2280 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2281 if (RT_SUCCESS(rc))
2282 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2283 break;
2284 }
2285
2286 /*
2287 * DBGF requests.
2288 */
2289#ifdef VBOX_WITH_DBGF_TRACING
2290 case VMMR0_DO_DBGF_TRACER_CREATE:
2291 {
2292 if (!pReqHdr || u64Arg || idCpu != 0)
2293 return VERR_INVALID_PARAMETER;
2294 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2295 break;
2296 }
2297
2298 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2299 {
2300 if (!pReqHdr || u64Arg)
2301 return VERR_INVALID_PARAMETER;
2302# if 0 /** @todo */
2303 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2304# else
2305 rc = VERR_NOT_IMPLEMENTED;
2306# endif
2307 break;
2308 }
2309#endif
2310
2311 case VMMR0_DO_DBGF_BP_INIT:
2312 {
2313 if (!pReqHdr || u64Arg || idCpu != 0)
2314 return VERR_INVALID_PARAMETER;
2315 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2316 break;
2317 }
2318
2319 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2320 {
2321 if (!pReqHdr || u64Arg || idCpu != 0)
2322 return VERR_INVALID_PARAMETER;
2323 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2324 break;
2325 }
2326
2327 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2328 {
2329 if (!pReqHdr || u64Arg || idCpu != 0)
2330 return VERR_INVALID_PARAMETER;
2331 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2332 break;
2333 }
2334
2335 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2336 {
2337 if (!pReqHdr || u64Arg || idCpu != 0)
2338 return VERR_INVALID_PARAMETER;
2339 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2340 break;
2341 }
2342
2343 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2344 {
2345 if (!pReqHdr || u64Arg || idCpu != 0)
2346 return VERR_INVALID_PARAMETER;
2347 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2348 break;
2349 }
2350
2351
2352 /*
2353 * TM requests.
2354 */
2355 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2356 {
2357 if (pReqHdr || idCpu == NIL_VMCPUID)
2358 return VERR_INVALID_PARAMETER;
2359 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2360 break;
2361 }
2362
2363 /*
2364 * For profiling.
2365 */
2366 case VMMR0_DO_NOP:
2367 case VMMR0_DO_SLOW_NOP:
2368 return VINF_SUCCESS;
2369
2370 /*
2371 * For testing Ring-0 APIs invoked in this environment.
2372 */
2373 case VMMR0_DO_TESTS:
2374 /** @todo make new test */
2375 return VINF_SUCCESS;
2376
2377 default:
2378 /*
2379 * We're returning VERR_NOT_SUPPORT here so we've got something else
2380 * than -1 which the interrupt gate glue code might return.
2381 */
2382 Log(("operation %#x is not supported\n", enmOperation));
2383 return VERR_NOT_SUPPORTED;
2384 }
2385 return rc;
2386}
2387
2388
2389/**
2390 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2391 *
2392 * @returns VBox status code.
2393 * @param pvArgs The argument package
2394 */
2395static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2396{
2397 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2398 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2399 pGVCpu->vmmr0.s.idCpu,
2400 pGVCpu->vmmr0.s.enmOperation,
2401 pGVCpu->vmmr0.s.pReq,
2402 pGVCpu->vmmr0.s.u64Arg,
2403 pGVCpu->vmmr0.s.pSession);
2404}
2405
2406
2407/**
2408 * The Ring 0 entry point, called by the support library (SUP).
2409 *
2410 * @returns VBox status code.
2411 * @param pGVM The global (ring-0) VM structure.
2412 * @param pVM The cross context VM structure.
2413 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2414 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2415 * @param enmOperation Which operation to execute.
2416 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2417 * @param u64Arg Some simple constant argument.
2418 * @param pSession The session of the caller.
2419 * @remarks Assume called with interrupts _enabled_.
2420 */
2421VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2422 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2423{
2424 /*
2425 * Requests that should only happen on the EMT thread will be
2426 * wrapped in a setjmp so we can assert without causing too much trouble.
2427 */
2428 if ( pVM != NULL
2429 && pGVM != NULL
2430 && pVM == pGVM /** @todo drop pVM or pGVM */
2431 && idCpu < pGVM->cCpus
2432 && pGVM->pSession == pSession
2433 && pGVM->pSelf == pGVM
2434 && enmOperation != VMMR0_DO_GVMM_DESTROY_VM
2435 && enmOperation != VMMR0_DO_GVMM_REGISTER_VMCPU
2436 && enmOperation != VMMR0_DO_GVMM_SCHED_WAKE_UP /* idCpu is not caller but target. Sigh. */ /** @todo fix*/
2437 && enmOperation != VMMR0_DO_GVMM_SCHED_POKE /* idCpu is not caller but target. Sigh. */ /** @todo fix*/
2438 )
2439 {
2440 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2441 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2442 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2443 && pGVCpu->hNativeThreadR0 == hNativeThread))
2444 {
2445 pGVCpu->vmmr0.s.pGVM = pGVM;
2446 pGVCpu->vmmr0.s.idCpu = idCpu;
2447 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2448 pGVCpu->vmmr0.s.pReq = pReq;
2449 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2450 pGVCpu->vmmr0.s.pSession = pSession;
2451 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmmr0.s.AssertJmpBuf, vmmR0EntryExWrapper, pGVCpu,
2452 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2453 }
2454 return VERR_VM_THREAD_NOT_EMT;
2455 }
2456 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2457}
2458
2459
2460/*********************************************************************************************************************************
2461* EMT Blocking *
2462*********************************************************************************************************************************/
2463
2464/**
2465 * Checks whether we've armed the ring-0 long jump machinery.
2466 *
2467 * @returns @c true / @c false
2468 * @param pVCpu The cross context virtual CPU structure.
2469 * @thread EMT
2470 * @sa VMMIsLongJumpArmed
2471 */
2472VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2473{
2474#ifdef RT_ARCH_X86
2475 return pVCpu->vmmr0.s.AssertJmpBuf.eip != 0;
2476#else
2477 return pVCpu->vmmr0.s.AssertJmpBuf.rip != 0;
2478#endif
2479}
2480
2481
2482/**
2483 * Locking helper that deals with HM context and checks if the thread can block.
2484 *
2485 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2486 * VERR_VMM_CANNOT_BLOCK if not able to block.
2487 * @param pVCpu The cross context virtual CPU structure of the calling
2488 * thread.
2489 * @param rcBusy What to return in case of a blocking problem. Will IPE
2490 * if VINF_SUCCESS and we cannot block.
2491 * @param pszCaller The caller (for logging problems).
2492 * @param pvLock The lock address (for logging problems).
2493 * @param pCtx Where to return context info for the resume call.
2494 * @thread EMT(pVCpu)
2495 */
2496VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2497 PVMMR0EMTBLOCKCTX pCtx)
2498{
2499 const char *pszMsg;
2500
2501 /*
2502 * Check that we are allowed to block.
2503 */
2504 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2505 {
2506 /*
2507 * Are we in HM context and w/o a context hook? If so work the context hook.
2508 */
2509 if (pVCpu->idHostCpu != NIL_RTCPUID)
2510 {
2511 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2512
2513 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2514 {
2515 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2516 if (pVCpu->vmmr0.s.pPreemptState)
2517 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2518
2519 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2520 pCtx->fWasInHmContext = true;
2521 return VINF_SUCCESS;
2522 }
2523 }
2524
2525 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2526 {
2527 /*
2528 * Not in HM context or we've got hooks, so just check that preemption
2529 * is enabled.
2530 */
2531 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2532 {
2533 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2534 pCtx->fWasInHmContext = false;
2535 return VINF_SUCCESS;
2536 }
2537 pszMsg = "Preemption is disabled!";
2538 }
2539 else
2540 pszMsg = "Preemption state w/o HM state!";
2541 }
2542 else
2543 pszMsg = "Ring-3 calls are disabled!";
2544
2545 static uint32_t volatile s_cWarnings = 0;
2546 if (++s_cWarnings < 50)
2547 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2548 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2549 pCtx->fWasInHmContext = false;
2550 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2551}
2552
2553
2554/**
2555 * Counterpart to VMMR0EmtPrepareToBlock.
2556 *
2557 * @param pVCpu The cross context virtual CPU structure of the calling
2558 * thread.
2559 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2560 * @thread EMT(pVCpu)
2561 */
2562VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2563{
2564 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2565 if (pCtx->fWasInHmContext)
2566 {
2567 if (pVCpu->vmmr0.s.pPreemptState)
2568 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2569
2570 pCtx->fWasInHmContext = false;
2571 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2572 }
2573 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2574}
2575
2576
2577/**
2578 * Helper for waiting on an RTSEMEVENT, caller did VMMR0EmtPrepareToBlock.
2579 *
2580 * @returns
2581 * @retval VERR_THREAD_IS_TERMINATING
2582 * @retval VERR_TIMEOUT if we ended up waiting too long, either according to
2583 * @a cMsTimeout or to maximum wait values.
2584 *
2585 * @param pGVCpu The ring-0 virtual CPU structure.
2586 * @param fFlags VMMR0EMTWAIT_F_XXX.
2587 * @param hEvent The event to wait on.
2588 * @param cMsTimeout The timeout or RT_INDEFINITE_WAIT.
2589 */
2590VMMR0_INT_DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
2591{
2592 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2593
2594 /*
2595 * Note! Similar code is found in the PDM critical sections too.
2596 */
2597 uint64_t const nsStart = RTTimeNanoTS();
2598 uint64_t cNsMaxTotal = cMsTimeout == RT_INDEFINITE_WAIT
2599 ? RT_NS_5MIN : RT_MIN(RT_NS_5MIN, RT_NS_1MS_64 * cMsTimeout);
2600 uint32_t cMsMaxOne = RT_MS_5SEC;
2601 bool fNonInterruptible = false;
2602 for (;;)
2603 {
2604 /* Wait. */
2605 int rcWait = !fNonInterruptible
2606 ? RTSemEventWaitNoResume(hEvent, cMsMaxOne)
2607 : RTSemEventWait(hEvent, cMsMaxOne);
2608 if (RT_SUCCESS(rcWait))
2609 return rcWait;
2610
2611 if (rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED)
2612 {
2613 uint64_t const cNsElapsed = RTTimeNanoTS() - nsStart;
2614
2615 /*
2616 * Check the thread termination status.
2617 */
2618 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
2619 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
2620 ("rcTerm=%Rrc\n", rcTerm));
2621 if ( rcTerm == VERR_NOT_SUPPORTED
2622 && !fNonInterruptible
2623 && cNsMaxTotal > RT_NS_1MIN)
2624 cNsMaxTotal = RT_NS_1MIN;
2625
2626 /* We return immediately if it looks like the thread is terminating. */
2627 if (rcTerm == VINF_THREAD_IS_TERMINATING)
2628 return VERR_THREAD_IS_TERMINATING;
2629
2630 /* We may suppress VERR_INTERRUPTED if VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED was
2631 specified, otherwise we'll just return it. */
2632 if (rcWait == VERR_INTERRUPTED)
2633 {
2634 if (!(fFlags & VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED))
2635 return VERR_INTERRUPTED;
2636 if (!fNonInterruptible)
2637 {
2638 /* First time: Adjust down the wait parameters and make sure we get at least
2639 one non-interruptible wait before timing out. */
2640 fNonInterruptible = true;
2641 cMsMaxOne = 32;
2642 uint64_t const cNsLeft = cNsMaxTotal - cNsElapsed;
2643 if (cNsLeft > RT_NS_10SEC)
2644 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
2645 continue;
2646 }
2647 }
2648
2649 /* Check for timeout. */
2650 if (cNsElapsed > cNsMaxTotal)
2651 return VERR_TIMEOUT;
2652 }
2653 else
2654 return rcWait;
2655 }
2656 /* not reached */
2657}
2658
2659
2660/**
2661 * Helper for signalling an SUPSEMEVENT.
2662 *
2663 * This may temporarily leave the HM context if the host requires that for
2664 * signalling SUPSEMEVENT objects.
2665 *
2666 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2667 * @param pGVM The ring-0 VM structure.
2668 * @param pGVCpu The ring-0 virtual CPU structure.
2669 * @param hEvent The event to signal.
2670 */
2671VMMR0_INT_DECL(int) VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent)
2672{
2673 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
2674 if (RTSemEventIsSignalSafe())
2675 return SUPSemEventSignal(pGVM->pSession, hEvent);
2676
2677 VMMR0EMTBLOCKCTX Ctx;
2678 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2679 if (RT_SUCCESS(rc))
2680 {
2681 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2682 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2683 }
2684 return rc;
2685}
2686
2687
2688/**
2689 * Helper for signalling an SUPSEMEVENT, variant supporting non-EMTs.
2690 *
2691 * This may temporarily leave the HM context if the host requires that for
2692 * signalling SUPSEMEVENT objects.
2693 *
2694 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
2695 * @param pGVM The ring-0 VM structure.
2696 * @param hEvent The event to signal.
2697 */
2698VMMR0_INT_DECL(int) VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent)
2699{
2700 if (!RTSemEventIsSignalSafe())
2701 {
2702 PGVMCPU pGVCpu = GVMMR0GetGVCpuByGVMandEMT(pGVM, NIL_RTNATIVETHREAD);
2703 if (pGVCpu)
2704 {
2705 VMMR0EMTBLOCKCTX Ctx;
2706 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
2707 if (RT_SUCCESS(rc))
2708 {
2709 rc = SUPSemEventSignal(pGVM->pSession, hEvent);
2710 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
2711 }
2712 return rc;
2713 }
2714 }
2715 return SUPSemEventSignal(pGVM->pSession, hEvent);
2716}
2717
2718
2719/*********************************************************************************************************************************
2720* Logging. *
2721*********************************************************************************************************************************/
2722
2723/**
2724 * VMMR0_DO_VMMR0_UPDATE_LOGGERS: Updates the EMT loggers for the VM.
2725 *
2726 * @returns VBox status code.
2727 * @param pGVM The global (ring-0) VM structure.
2728 * @param idCpu The ID of the calling EMT.
2729 * @param pReq The request data.
2730 * @param idxLogger Which logger set to update.
2731 * @thread EMT(idCpu)
2732 */
2733static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, size_t idxLogger)
2734{
2735 /*
2736 * Check sanity. First we require EMT to be calling us.
2737 */
2738 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2739 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2740
2741 AssertReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[0]), VERR_INVALID_PARAMETER);
2742 AssertReturn(pReq->cGroups < _8K, VERR_INVALID_PARAMETER);
2743 AssertReturn(pReq->Hdr.cbReq == RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[pReq->cGroups]), VERR_INVALID_PARAMETER);
2744
2745 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2746
2747 /*
2748 * Adjust flags.
2749 */
2750 /* Always buffered: */
2751 pReq->fFlags |= RTLOGFLAGS_BUFFERED;
2752 /* These doesn't make sense at present: */
2753 pReq->fFlags &= ~(RTLOGFLAGS_FLUSH | RTLOGFLAGS_WRITE_THROUGH);
2754 /* We've traditionally skipped the group restrictions. */
2755 pReq->fFlags &= ~RTLOGFLAGS_RESTRICT_GROUPS;
2756
2757 /*
2758 * Do the updating.
2759 */
2760 int rc = VINF_SUCCESS;
2761 for (idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
2762 {
2763 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2764 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.aLoggers[idxLogger].pLogger;
2765 if (pLogger)
2766 {
2767 RTLogSetR0ProgramStart(pLogger, pGVM->vmm.s.nsProgramStart);
2768 rc = RTLogBulkUpdate(pLogger, pReq->fFlags, pReq->uGroupCrc32, pReq->cGroups, pReq->afGroups);
2769 }
2770 }
2771
2772 return rc;
2773}
2774
2775
2776/**
2777 * VMMR0_DO_VMMR0_LOG_FLUSHER: Get the next log flushing job.
2778 *
2779 * The job info is copied into VMM::LogFlusherItem.
2780 *
2781 * @returns VBox status code.
2782 * @retval VERR_OBJECT_DESTROYED if we're shutting down.
2783 * @retval VERR_NOT_OWNER if the calling thread is not the flusher thread.
2784 * @param pGVM The global (ring-0) VM structure.
2785 * @thread The log flusher thread (first caller automatically becomes the log
2786 * flusher).
2787 */
2788static int vmmR0LogFlusher(PGVM pGVM)
2789{
2790 /*
2791 * Check that this really is the flusher thread.
2792 */
2793 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2794 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
2795 if (RT_LIKELY(pGVM->vmmr0.s.LogFlusher.hThread == hNativeSelf))
2796 { /* likely */ }
2797 else
2798 {
2799 /* The first caller becomes the flusher thread. */
2800 bool fOk;
2801 ASMAtomicCmpXchgHandle(&pGVM->vmmr0.s.LogFlusher.hThread, hNativeSelf, NIL_RTNATIVETHREAD, fOk);
2802 if (!fOk)
2803 return VERR_NOT_OWNER;
2804 pGVM->vmmr0.s.LogFlusher.fThreadRunning = true;
2805 }
2806
2807 /*
2808 * Acknowledge flush, waking up waiting EMT.
2809 */
2810 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2811
2812 uint32_t idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2813 uint32_t idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2814 if ( idxTail != idxHead
2815 && pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing)
2816 {
2817 /* Pop the head off the ring buffer. */
2818 uint32_t const idCpu = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idCpu;
2819 uint32_t const idxLogger = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxLogger;
2820 uint32_t const idxBuffer = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxBuffer;
2821
2822 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32 = UINT32_MAX >> 1; /* invalidate the entry */
2823 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2824
2825 /* Validate content. */
2826 if ( idCpu < pGVM->cCpus
2827 && idxLogger < VMMLOGGER_IDX_MAX
2828 && idxBuffer < VMMLOGGER_BUFFER_COUNT)
2829 {
2830 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2831 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2832 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2833
2834 /*
2835 * Accounting.
2836 */
2837 uint32_t cFlushing = pR0Log->cFlushing - 1;
2838 if (RT_LIKELY(cFlushing < VMMLOGGER_BUFFER_COUNT))
2839 { /*likely*/ }
2840 else
2841 cFlushing = 0;
2842 pR0Log->cFlushing = cFlushing;
2843 ASMAtomicWriteU32(&pShared->cFlushing, cFlushing);
2844
2845 /*
2846 * Wake up the EMT if it's waiting.
2847 */
2848 if (!pR0Log->fEmtWaiting)
2849 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2850 else
2851 {
2852 pR0Log->fEmtWaiting = false;
2853 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2854
2855 int rc = RTSemEventSignal(pR0Log->hEventFlushWait);
2856 if (RT_FAILURE(rc))
2857 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n",
2858 idxHead, idCpu, idxLogger, idxBuffer, rc));
2859 }
2860 }
2861 else
2862 {
2863 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2864 LogRelMax(64, ("vmmR0LogFlusher: Bad ACK entry #%u: %u/%u/%u!\n", idxHead, idCpu, idxLogger, idxBuffer));
2865 }
2866
2867 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2868 }
2869
2870 /*
2871 * The wait loop.
2872 */
2873 int rc;
2874 for (;;)
2875 {
2876 /*
2877 * Work pending?
2878 */
2879 idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2880 idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
2881 if (idxTail != idxHead)
2882 {
2883 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing = true;
2884 pGVM->vmm.s.LogFlusherItem.u32 = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32;
2885
2886 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2887 return VINF_SUCCESS;
2888 }
2889
2890 /*
2891 * Nothing to do, so, check for termination and go to sleep.
2892 */
2893 if (!pGVM->vmmr0.s.LogFlusher.fThreadShutdown)
2894 { /* likely */ }
2895 else
2896 {
2897 rc = VERR_OBJECT_DESTROYED;
2898 break;
2899 }
2900
2901 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = true;
2902 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2903
2904 rc = RTSemEventWaitNoResume(pGVM->vmmr0.s.LogFlusher.hEvent, RT_MS_5MIN);
2905
2906 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2907 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false;
2908
2909 if (RT_SUCCESS(rc) || rc == VERR_TIMEOUT)
2910 { /* likely */ }
2911 else if (rc == VERR_INTERRUPTED)
2912 {
2913 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2914 return rc;
2915 }
2916 else if (rc == VERR_SEM_DESTROYED || rc == VERR_INVALID_HANDLE)
2917 break;
2918 else
2919 {
2920 LogRel(("vmmR0LogFlusher: RTSemEventWaitNoResume returned unexpected status %Rrc\n", rc));
2921 break;
2922 }
2923 }
2924
2925 /*
2926 * Terminating - prevent further calls and indicate to the EMTs that we're no longer around.
2927 */
2928 pGVM->vmmr0.s.LogFlusher.hThread = ~pGVM->vmmr0.s.LogFlusher.hThread; /* (should be reasonably safe) */
2929 pGVM->vmmr0.s.LogFlusher.fThreadRunning = false;
2930
2931 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2932 return rc;
2933}
2934
2935
2936/**
2937 * VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED: Waits for the flusher thread to finish all
2938 * buffers for logger @a idxLogger.
2939 *
2940 * @returns VBox status code.
2941 * @param pGVM The global (ring-0) VM structure.
2942 * @param idCpu The ID of the calling EMT.
2943 * @param idxLogger Which logger to wait on.
2944 * @thread EMT(idCpu)
2945 */
2946static int vmmR0LogWaitFlushed(PGVM pGVM, VMCPUID idCpu, size_t idxLogger)
2947{
2948 /*
2949 * Check sanity. First we require EMT to be calling us.
2950 */
2951 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
2952 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2953 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
2954 AssertReturn(idxLogger < VMMLOGGER_IDX_MAX, VERR_OUT_OF_RANGE);
2955 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2956
2957 /*
2958 * Do the waiting.
2959 */
2960 int rc = VINF_SUCCESS;
2961 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2962 uint32_t cFlushing = pR0Log->cFlushing;
2963 while (cFlushing > 0)
2964 {
2965 pR0Log->fEmtWaiting = true;
2966 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2967
2968 rc = RTSemEventWaitNoResume(pR0Log->hEventFlushWait, RT_MS_5MIN);
2969
2970 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2971 pR0Log->fEmtWaiting = false;
2972 if (RT_SUCCESS(rc))
2973 {
2974 /* Read the new count, make sure it decreased before looping. That
2975 way we can guarentee that we will only wait more than 5 min * buffers. */
2976 uint32_t const cPrevFlushing = cFlushing;
2977 cFlushing = pR0Log->cFlushing;
2978 if (cFlushing < cPrevFlushing)
2979 continue;
2980 rc = VERR_INTERNAL_ERROR_3;
2981 }
2982 break;
2983 }
2984 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
2985 return rc;
2986}
2987
2988
2989/**
2990 * Inner worker for vmmR0LoggerFlushCommon.
2991 */
2992static bool vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
2993{
2994 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
2995 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
2996
2997 /*
2998 * Figure out what we need to do and whether we can.
2999 */
3000 enum { kJustSignal, kPrepAndSignal, kPrepSignalAndWait } enmAction;
3001#if VMMLOGGER_BUFFER_COUNT >= 2
3002 if (pR0Log->cFlushing < VMMLOGGER_BUFFER_COUNT - 1)
3003 {
3004 if (RTSemEventIsSignalSafe())
3005 enmAction = kJustSignal;
3006 else if (VMMRZCallRing3IsEnabled(pGVCpu))
3007 enmAction = kPrepAndSignal;
3008 else
3009 {
3010 /** @todo This is a bit simplistic. We could introduce a FF to signal the
3011 * thread or similar. */
3012 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3013# if defined(RT_OS_LINUX)
3014 SUP_DPRINTF(("vmmR0LoggerFlush: Signalling not safe and EMT blocking disabled! (%u bytes)\n", cbToFlush));
3015# endif
3016 pShared->cbDropped += cbToFlush;
3017 return true;
3018 }
3019 }
3020 else
3021#endif
3022 if (VMMRZCallRing3IsEnabled(pGVCpu))
3023 enmAction = kPrepSignalAndWait;
3024 else
3025 {
3026 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3027# if defined(RT_OS_LINUX)
3028 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", cbToFlush));
3029# endif
3030 pShared->cbDropped += cbToFlush;
3031 return true;
3032 }
3033
3034 /*
3035 * Prepare for blocking if necessary.
3036 */
3037 VMMR0EMTBLOCKCTX Ctx;
3038 if (enmAction != kJustSignal)
3039 {
3040 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushInner", pR0Log->hEventFlushWait, &Ctx);
3041 if (RT_SUCCESS(rc))
3042 { /* likely */ }
3043 else
3044 {
3045 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock);
3046 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc));
3047 return false;
3048 }
3049 }
3050
3051 /*
3052 * Queue the flush job.
3053 */
3054 bool fFlushedBuffer;
3055 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3056 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning)
3057 {
3058 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3059 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3060 uint32_t const idxNewTail = (idxTail + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing);
3061 if (idxNewTail != idxHead)
3062 {
3063 /* Queue it. */
3064 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu;
3065 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger;
3066 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer;
3067 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0;
3068 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail;
3069
3070 /* Update the number of buffers currently being flushed. */
3071 uint32_t cFlushing = pR0Log->cFlushing;
3072 cFlushing = RT_MIN(cFlushing + 1, VMMLOGGER_BUFFER_COUNT);
3073 pShared->cFlushing = pR0Log->cFlushing = cFlushing;
3074
3075 /* We must wait if all buffers are currently being flushed. */
3076 bool const fEmtWaiting = cFlushing >= VMMLOGGER_BUFFER_COUNT && enmAction != kJustSignal /* paranoia */;
3077 pR0Log->fEmtWaiting = fEmtWaiting;
3078
3079 /* Stats. */
3080 STAM_REL_COUNTER_INC(&pShared->StatFlushes);
3081 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes);
3082
3083 /* Signal the worker thread. */
3084 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting)
3085 {
3086 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3087 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent);
3088 }
3089 else
3090 {
3091 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp);
3092 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3093 }
3094
3095 /*
3096 * Wait for a buffer to finish flushing.
3097 *
3098 * Note! Lazy bird is ignoring the status code here. The result is
3099 * that we might end up with an extra even signalling and the
3100 * next time we need to wait we won't and end up with some log
3101 * corruption. However, it's too much hazzle right now for
3102 * a scenario which would most likely end the process rather
3103 * than causing log corruption.
3104 */
3105 if (fEmtWaiting)
3106 {
3107 STAM_REL_PROFILE_START(&pShared->StatWait, a);
3108 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED,
3109 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT);
3110 STAM_REL_PROFILE_STOP(&pShared->StatWait, a);
3111 }
3112
3113 /*
3114 * We always switch buffer if we have more than one.
3115 */
3116#if VMMLOGGER_BUFFER_COUNT == 1
3117 fFlushedBuffer = true;
3118#else
3119 AssertCompile(VMMLOGGER_BUFFER_COUNT >= 1);
3120 pShared->idxBuf = (idxBuffer + 1) % VMMLOGGER_BUFFER_COUNT;
3121 fFlushedBuffer = false;
3122#endif
3123 }
3124 else
3125 {
3126 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3127 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n"));
3128 fFlushedBuffer = true;
3129 }
3130 }
3131 else
3132 {
3133 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3134 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", cbToFlush));
3135 fFlushedBuffer = true;
3136 }
3137
3138 /*
3139 * Restore the HM context.
3140 */
3141 if (enmAction != kJustSignal)
3142 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
3143
3144 return fFlushedBuffer;
3145}
3146
3147
3148/**
3149 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush.
3150 */
3151static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, uint32_t idxLogger)
3152{
3153 /*
3154 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3.
3155 * (This is a bit paranoid code.)
3156 */
3157 if (RT_VALID_PTR(pLogger))
3158 {
3159 if ( pLogger->u32Magic == RTLOGGER_MAGIC
3160 && (pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_MAGIC_MASK) == VMMR0_LOGGER_FLAGS_MAGIC_VALUE
3161 && pLogger->u64UserValue2 == pLogger->u64UserValue3)
3162 {
3163 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2;
3164 if ( RT_VALID_PTR(pGVCpu)
3165 && ((uintptr_t)pGVCpu & HOST_PAGE_OFFSET_MASK) == 0)
3166 {
3167 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
3168 PGVM const pGVM = pGVCpu->pGVM;
3169 if ( hNativeSelf == pGVCpu->hEMT
3170 && RT_VALID_PTR(pGVM))
3171 {
3172 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3173 size_t const idxBuffer = pBufDesc - &pR0Log->aBufDescs[0];
3174 if (idxBuffer < VMMLOGGER_BUFFER_COUNT)
3175 {
3176 /*
3177 * Make sure we don't recurse forever here should something in the
3178 * following code trigger logging or an assertion. Do the rest in
3179 * an inner work to avoid hitting the right margin too hard.
3180 */
3181 if (!pR0Log->fFlushing)
3182 {
3183 pR0Log->fFlushing = true;
3184 bool fFlushed = vmmR0LoggerFlushInner(pGVM, pGVCpu, idxLogger, idxBuffer, pBufDesc->offBuf);
3185 pR0Log->fFlushing = false;
3186 return fFlushed;
3187 }
3188
3189 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n"));
3190 }
3191 else
3192 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p: idxBuffer=%#zx\n", pLogger, pGVCpu, idxBuffer));
3193 }
3194 else
3195 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n",
3196 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf));
3197 }
3198 else
3199 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu));
3200 }
3201 else
3202 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n",
3203 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3));
3204 }
3205 else
3206 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
3207 return true;
3208}
3209
3210
3211/**
3212 * @callback_method_impl{FNRTLOGFLUSH, Release logger buffer flush callback.}
3213 */
3214static DECLCALLBACK(bool) vmmR0LogRelFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3215{
3216 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_RELEASE);
3217}
3218
3219
3220/**
3221 * @callback_method_impl{FNRTLOGFLUSH, Logger (debug) buffer flush callback.}
3222 */
3223static DECLCALLBACK(bool) vmmR0LogFlush(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc)
3224{
3225#ifdef LOG_ENABLED
3226 return vmmR0LoggerFlushCommon(pLogger, pBufDesc, VMMLOGGER_IDX_REGULAR);
3227#else
3228 RT_NOREF(pLogger, pBufDesc);
3229 return true;
3230#endif
3231}
3232
3233
3234/*
3235 * Override RTLogDefaultInstanceEx so we can do logging from EMTs in ring-0.
3236 */
3237DECLEXPORT(PRTLOGGER) RTLogDefaultInstanceEx(uint32_t fFlagsAndGroup)
3238{
3239#ifdef LOG_ENABLED
3240 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3241 if (pGVCpu)
3242 {
3243 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.Logger.pLogger;
3244 if (RT_VALID_PTR(pLogger))
3245 {
3246 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3247 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3248 {
3249 if (!pGVCpu->vmmr0.s.u.s.Logger.fFlushing)
3250 {
3251 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3252 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3253 return NULL;
3254 }
3255
3256 /*
3257 * When we're flushing we _must_ return NULL here to suppress any
3258 * attempts at using the logger while in vmmR0LoggerFlushCommon.
3259 * The VMMR0EmtPrepareToBlock code may trigger logging in HM,
3260 * which will reset the buffer content before we even get to queue
3261 * the flush request. (Only an issue when VBOX_WITH_R0_LOGGING
3262 * is enabled.)
3263 */
3264 return NULL;
3265 }
3266 }
3267 }
3268#endif
3269 return SUPR0DefaultLogInstanceEx(fFlagsAndGroup);
3270}
3271
3272
3273/*
3274 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
3275 */
3276DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
3277{
3278 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
3279 if (pGVCpu)
3280 {
3281 PRTLOGGER pLogger = pGVCpu->vmmr0.s.u.s.RelLogger.pLogger;
3282 if (RT_VALID_PTR(pLogger))
3283 {
3284 if ( pLogger->u64UserValue2 == (uintptr_t)pGVCpu
3285 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu)
3286 {
3287 if (!pGVCpu->vmmr0.s.u.s.RelLogger.fFlushing)
3288 {
3289 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled))
3290 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup);
3291 return NULL;
3292 }
3293 }
3294 }
3295 }
3296 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
3297}
3298
3299
3300/**
3301 * Helper for vmmR0InitLoggerSet
3302 */
3303static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,
3304 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)
3305{
3306 /*
3307 * Create and configure the logger.
3308 */
3309 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3310 {
3311 pR0Log->aBufDescs[i].u32Magic = RTLOGBUFFERDESC_MAGIC;
3312 pR0Log->aBufDescs[i].uReserved = 0;
3313 pR0Log->aBufDescs[i].cbBuf = cbBuf;
3314 pR0Log->aBufDescs[i].offBuf = 0;
3315 pR0Log->aBufDescs[i].pchBuf = pchBuf + i * cbBuf;
3316 pR0Log->aBufDescs[i].pAux = &pShared->aBufs[i].AuxDesc;
3317
3318 pShared->aBufs[i].AuxDesc.fFlushedIndicator = false;
3319 pShared->aBufs[i].AuxDesc.afPadding[0] = 0;
3320 pShared->aBufs[i].AuxDesc.afPadding[1] = 0;
3321 pShared->aBufs[i].AuxDesc.afPadding[2] = 0;
3322 pShared->aBufs[i].AuxDesc.offBuf = 0;
3323 pShared->aBufs[i].pchBufR3 = pchBufR3 + i * cbBuf;
3324 }
3325 pShared->cbBuf = cbBuf;
3326
3327 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
3328 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,
3329 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,
3330 VMMLOGGER_BUFFER_COUNT, pR0Log->aBufDescs, RTLOGDEST_DUMMY,
3331 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
3332 NULL /*pOutputIf*/, NULL /*pvOutputIfUser*/,
3333 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);
3334 if (RT_SUCCESS(rc))
3335 {
3336 PRTLOGGER pLogger = pR0Log->pLogger;
3337 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;
3338 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;
3339 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;
3340
3341 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);
3342 if (RT_SUCCESS(rc))
3343 {
3344 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);
3345
3346 /*
3347 * Create the event sem the EMT waits on while flushing is happening.
3348 */
3349 rc = RTSemEventCreate(&pR0Log->hEventFlushWait);
3350 if (RT_SUCCESS(rc))
3351 return VINF_SUCCESS;
3352 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3353 }
3354 RTLogDestroy(pLogger);
3355 }
3356 pR0Log->pLogger = NULL;
3357 return rc;
3358}
3359
3360
3361/**
3362 * Worker for VMMR0CleanupVM and vmmR0InitLoggerSet that destroys one logger.
3363 */
3364static void vmmR0TermLoggerOne(PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared)
3365{
3366 RTLogDestroy(pR0Log->pLogger);
3367 pR0Log->pLogger = NULL;
3368
3369 for (size_t i = 0; i < VMMLOGGER_BUFFER_COUNT; i++)
3370 pShared->aBufs[i].pchBufR3 = NIL_RTR3PTR;
3371
3372 RTSemEventDestroy(pR0Log->hEventFlushWait);
3373 pR0Log->hEventFlushWait = NIL_RTSEMEVENT;
3374}
3375
3376
3377/**
3378 * Initializes one type of loggers for each EMT.
3379 */
3380static int vmmR0InitLoggerSet(PGVM pGVM, uint8_t idxLogger, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)
3381{
3382 /* Allocate buffers first. */
3383 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus * VMMLOGGER_BUFFER_COUNT, false /*fExecutable*/);
3384 if (RT_SUCCESS(rc))
3385 {
3386 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);
3387 if (RT_SUCCESS(rc))
3388 {
3389 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);
3390 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);
3391
3392 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);
3393 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);
3394
3395 /* Initialize the per-CPU loggers. */
3396 for (uint32_t i = 0; i < pGVM->cCpus; i++)
3397 {
3398 PGVMCPU pGVCpu = &pGVM->aCpus[i];
3399 PVMMR0PERVCPULOGGER pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
3400 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];
3401 rc = vmmR0InitLoggerOne(pGVCpu, idxLogger == VMMLOGGER_IDX_RELEASE, pR0Log, pShared, cbBuf,
3402 pchBuf + i * cbBuf * VMMLOGGER_BUFFER_COUNT,
3403 pchBufR3 + i * cbBuf * VMMLOGGER_BUFFER_COUNT);
3404 if (RT_FAILURE(rc))
3405 {
3406 vmmR0TermLoggerOne(pR0Log, pShared);
3407 while (i-- > 0)
3408 {
3409 pGVCpu = &pGVM->aCpus[i];
3410 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[idxLogger], &pGVCpu->vmm.s.u.aLoggers[idxLogger]);
3411 }
3412 break;
3413 }
3414 }
3415 if (RT_SUCCESS(rc))
3416 return VINF_SUCCESS;
3417
3418 /* Bail out. */
3419 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);
3420 *phMapObj = NIL_RTR0MEMOBJ;
3421 }
3422 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);
3423 *phMemObj = NIL_RTR0MEMOBJ;
3424 }
3425 return rc;
3426}
3427
3428
3429/**
3430 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3431 *
3432 * @returns VBox status code.
3433 * @param pGVM The global (ring-0) VM structure.
3434 */
3435static int vmmR0InitLoggers(PGVM pGVM)
3436{
3437 /*
3438 * Invalidate the ring buffer (not really necessary).
3439 */
3440 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); idx++)
3441 pGVM->vmmr0.s.LogFlusher.aRing[idx].u32 = UINT32_MAX >> 1; /* (all bits except fProcessing set) */
3442
3443 /*
3444 * Create the spinlock and flusher event semaphore.
3445 */
3446 int rc = RTSpinlockCreate(&pGVM->vmmr0.s.LogFlusher.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VM-Log-Flusher");
3447 if (RT_SUCCESS(rc))
3448 {
3449 rc = RTSemEventCreate(&pGVM->vmmr0.s.LogFlusher.hEvent);
3450 if (RT_SUCCESS(rc))
3451 {
3452 /*
3453 * Create the ring-0 release loggers.
3454 */
3455 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_RELEASE, _4K,
3456 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger);
3457#ifdef LOG_ENABLED
3458 if (RT_SUCCESS(rc))
3459 {
3460 /*
3461 * Create debug loggers.
3462 */
3463 rc = vmmR0InitLoggerSet(pGVM, VMMLOGGER_IDX_REGULAR, _64K,
3464 &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger);
3465 }
3466#endif
3467 }
3468 }
3469 return rc;
3470}
3471
3472
3473/**
3474 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff.
3475 *
3476 * @param pGVM The global (ring-0) VM structure.
3477 */
3478static void vmmR0CleanupLoggers(PGVM pGVM)
3479{
3480 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
3481 {
3482 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3483 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
3484 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.u.aLoggers[iLogger], &pGVCpu->vmm.s.u.aLoggers[iLogger]);
3485 }
3486
3487 /*
3488 * Free logger buffer memory.
3489 */
3490 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/);
3491 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ;
3492 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/);
3493 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ;
3494
3495 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/);
3496 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ;
3497 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/);
3498 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ;
3499
3500 /*
3501 * Free log flusher related stuff.
3502 */
3503 RTSpinlockDestroy(pGVM->vmmr0.s.LogFlusher.hSpinlock);
3504 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK;
3505 RTSemEventDestroy(pGVM->vmmr0.s.LogFlusher.hEvent);
3506 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT;
3507}
3508
3509
3510/*********************************************************************************************************************************
3511* Assertions *
3512*********************************************************************************************************************************/
3513
3514/**
3515 * Installs a notification callback for ring-0 assertions.
3516 *
3517 * @param pVCpu The cross context virtual CPU structure.
3518 * @param pfnCallback Pointer to the callback.
3519 * @param pvUser The user argument.
3520 *
3521 * @return VBox status code.
3522 */
3523VMMR0_INT_DECL(int) VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser)
3524{
3525 AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
3526 AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
3527
3528 if (!pVCpu->vmmr0.s.pfnAssertCallback)
3529 {
3530 pVCpu->vmmr0.s.pfnAssertCallback = pfnCallback;
3531 pVCpu->vmmr0.s.pvAssertCallbackUser = pvUser;
3532 return VINF_SUCCESS;
3533 }
3534 return VERR_ALREADY_EXISTS;
3535}
3536
3537
3538/**
3539 * Removes the ring-0 callback.
3540 *
3541 * @param pVCpu The cross context virtual CPU structure.
3542 */
3543VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu)
3544{
3545 pVCpu->vmmr0.s.pfnAssertCallback = NULL;
3546 pVCpu->vmmr0.s.pvAssertCallbackUser = NULL;
3547}
3548
3549
3550/**
3551 * Checks whether there is a ring-0 callback notification active.
3552 *
3553 * @param pVCpu The cross context virtual CPU structure.
3554 * @returns true if there the notification is active, false otherwise.
3555 */
3556VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu)
3557{
3558 return pVCpu->vmmr0.s.pfnAssertCallback != NULL;
3559}
3560
3561
3562/*
3563 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
3564 *
3565 * @returns true if the breakpoint should be hit, false if it should be ignored.
3566 */
3567DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
3568{
3569#if 0
3570 return true;
3571#else
3572 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3573 if (pVM)
3574 {
3575 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3576
3577 if (pVCpu)
3578 {
3579# ifdef RT_ARCH_X86
3580 if (pVCpu->vmmr0.s.AssertJmpBuf.eip)
3581# else
3582 if (pVCpu->vmmr0.s.AssertJmpBuf.rip)
3583# endif
3584 {
3585 if (pVCpu->vmmr0.s.pfnAssertCallback)
3586 pVCpu->vmmr0.s.pfnAssertCallback(pVCpu, pVCpu->vmmr0.s.pvAssertCallbackUser);
3587 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmmr0.s.AssertJmpBuf, VERR_VMM_RING0_ASSERTION);
3588 return RT_FAILURE_NP(rc);
3589 }
3590 }
3591 }
3592# ifdef RT_OS_LINUX
3593 return true;
3594# else
3595 return false;
3596# endif
3597#endif
3598}
3599
3600
3601/*
3602 * Override this so we can push it up to ring-3.
3603 */
3604DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
3605{
3606 /*
3607 * To host kernel log/whatever.
3608 */
3609 SUPR0Printf("!!R0-Assertion Failed!!\n"
3610 "Expression: %s\n"
3611 "Location : %s(%d) %s\n",
3612 pszExpr, pszFile, uLine, pszFunction);
3613
3614 /*
3615 * To the log.
3616 */
3617 LogAlways(("\n!!R0-Assertion Failed!!\n"
3618 "Expression: %s\n"
3619 "Location : %s(%d) %s\n",
3620 pszExpr, pszFile, uLine, pszFunction));
3621
3622 /*
3623 * To the global VMM buffer.
3624 */
3625 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3626 if (pVM)
3627 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
3628 "\n!!R0-Assertion Failed!!\n"
3629 "Expression: %.*s\n"
3630 "Location : %s(%d) %s\n",
3631 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
3632 pszFile, uLine, pszFunction);
3633
3634 /*
3635 * Continue the normal way.
3636 */
3637 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
3638}
3639
3640
3641/**
3642 * Callback for RTLogFormatV which writes to the ring-3 log port.
3643 * See PFNLOGOUTPUT() for details.
3644 */
3645static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
3646{
3647 for (size_t i = 0; i < cbChars; i++)
3648 {
3649 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
3650 }
3651
3652 NOREF(pv);
3653 return cbChars;
3654}
3655
3656
3657/*
3658 * Override this so we can push it up to ring-3.
3659 */
3660DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
3661{
3662 va_list vaCopy;
3663
3664 /*
3665 * Push the message to the loggers.
3666 */
3667 PRTLOGGER pLog = RTLogRelGetDefaultInstance();
3668 if (pLog)
3669 {
3670 va_copy(vaCopy, va);
3671 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3672 va_end(vaCopy);
3673 }
3674 pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
3675 if (pLog)
3676 {
3677 va_copy(vaCopy, va);
3678 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
3679 va_end(vaCopy);
3680 }
3681
3682 /*
3683 * Push it to the global VMM buffer.
3684 */
3685 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
3686 if (pVM)
3687 {
3688 va_copy(vaCopy, va);
3689 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
3690 va_end(vaCopy);
3691 }
3692
3693 /*
3694 * Continue the normal way.
3695 */
3696 RTAssertMsg2V(pszFormat, va);
3697}
3698
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette