VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMM.cpp@ 40937

Last change on this file since 40937 was 40294, checked in by vboxsync, 13 years ago

Print intr inhibit address too. Renamed to fflags to avoid ambiguity until parser is fixed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 85.8 KB
Line 
1/* $Id: VMM.cpp 40294 2012-02-29 13:30:20Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18//#define NO_SUPCALLR0VMM
19
20/** @page pg_vmm VMM - The Virtual Machine Monitor
21 *
22 * The VMM component is two things at the moment, it's a component doing a few
23 * management and routing tasks, and it's the whole virtual machine monitor
24 * thing. For hysterical reasons, it is not doing all the management that one
25 * would expect, this is instead done by @ref pg_vm. We'll address this
26 * misdesign eventually.
27 *
28 * @see grp_vmm, grp_vm
29 *
30 *
31 * @section sec_vmmstate VMM State
32 *
33 * @image html VM_Statechart_Diagram.gif
34 *
35 * To be written.
36 *
37 *
38 * @subsection subsec_vmm_init VMM Initialization
39 *
40 * To be written.
41 *
42 *
43 * @subsection subsec_vmm_term VMM Termination
44 *
45 * To be written.
46 *
47 *
48 * @sections sec_vmm_limits VMM Limits
49 *
50 * There are various resource limits imposed by the VMM and it's
51 * sub-components. We'll list some of them here.
52 *
53 * On 64-bit hosts:
54 * - Max 8191 VMs. Imposed by GVMM's handle allocation (GVMM_MAX_HANDLES),
55 * can be increased up to 64K - 1.
56 * - Max 16TB - 64KB of the host memory can be used for backing VM RAM and
57 * ROM pages. The limit is imposed by the 32-bit page ID used by GMM.
58 * - A VM can be assigned all the memory we can use (16TB), however, the
59 * Main API will restrict this to 2TB (MM_RAM_MAX_IN_MB).
60 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
61 *
62 * On 32-bit hosts:
63 * - Max 127 VMs. Imposed by GMM's per page structure.
64 * - Max 64GB - 64KB of the host memory can be used for backing VM RAM and
65 * ROM pages. The limit is imposed by the 28-bit page ID used
66 * internally in GMM. It is also limited by PAE.
67 * - A VM can be assigned all the memory GMM can allocate, however, the
68 * Main API will restrict this to 3584MB (MM_RAM_MAX_IN_MB).
69 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
70 *
71 */
72
73/*******************************************************************************
74* Header Files *
75*******************************************************************************/
76#define LOG_GROUP LOG_GROUP_VMM
77#include <VBox/vmm/vmm.h>
78#include <VBox/vmm/vmapi.h>
79#include <VBox/vmm/pgm.h>
80#include <VBox/vmm/cfgm.h>
81#include <VBox/vmm/pdmqueue.h>
82#include <VBox/vmm/pdmcritsect.h>
83#include <VBox/vmm/pdmapi.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/mm.h>
86#include <VBox/vmm/iom.h>
87#include <VBox/vmm/trpm.h>
88#include <VBox/vmm/selm.h>
89#include <VBox/vmm/em.h>
90#include <VBox/sup.h>
91#include <VBox/vmm/dbgf.h>
92#include <VBox/vmm/csam.h>
93#include <VBox/vmm/patm.h>
94#ifdef VBOX_WITH_REM
95# include <VBox/vmm/rem.h>
96#endif
97#include <VBox/vmm/ssm.h>
98#include <VBox/vmm/tm.h>
99#include "VMMInternal.h"
100#include "VMMSwitcher.h"
101#include <VBox/vmm/vm.h>
102#include <VBox/vmm/ftm.h>
103
104#include <VBox/err.h>
105#include <VBox/param.h>
106#include <VBox/version.h>
107#include <VBox/vmm/hwaccm.h>
108#include <iprt/assert.h>
109#include <iprt/alloc.h>
110#include <iprt/asm.h>
111#include <iprt/time.h>
112#include <iprt/semaphore.h>
113#include <iprt/stream.h>
114#include <iprt/string.h>
115#include <iprt/stdarg.h>
116#include <iprt/ctype.h>
117#include <iprt/x86.h>
118
119
120
121/*******************************************************************************
122* Defined Constants And Macros *
123*******************************************************************************/
124/** The saved state version. */
125#define VMM_SAVED_STATE_VERSION 4
126/** The saved state version used by v3.0 and earlier. (Teleportation) */
127#define VMM_SAVED_STATE_VERSION_3_0 3
128
129
130/*******************************************************************************
131* Internal Functions *
132*******************************************************************************/
133static int vmmR3InitStacks(PVM pVM);
134static int vmmR3InitLoggers(PVM pVM);
135static void vmmR3InitRegisterStats(PVM pVM);
136static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
137static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
138static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
139static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu);
140static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
141
142
143/**
144 * Initializes the VMM.
145 *
146 * @returns VBox status code.
147 * @param pVM The VM to operate on.
148 */
149VMMR3_INT_DECL(int) VMMR3Init(PVM pVM)
150{
151 LogFlow(("VMMR3Init\n"));
152
153 /*
154 * Assert alignment, sizes and order.
155 */
156 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
157 AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding));
158 AssertCompile(sizeof(pVM->aCpus[0].vmm.s) <= sizeof(pVM->aCpus[0].vmm.padding));
159
160 /*
161 * Init basic VM VMM members.
162 */
163 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
164 pVM->vmm.s.pahEvtRendezvousEnterOrdered = NULL;
165 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
166 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
167 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
168 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
169
170 /** @cfgm{YieldEMTInterval, uint32_t, 1, UINT32_MAX, 23, ms}
171 * The EMT yield interval. The EMT yielding is a hack we employ to play a
172 * bit nicer with the rest of the system (like for instance the GUI).
173 */
174 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies,
175 23 /* Value arrived at after experimenting with the grub boot prompt. */);
176 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
177
178
179 /** @cfgm{VMM/UsePeriodicPreemptionTimers, boolean, true}
180 * Controls whether we employ per-cpu preemption timers to limit the time
181 * spent executing guest code. This option is not available on all
182 * platforms and we will silently ignore this setting then. If we are
183 * running in VT-x mode, we will use the VMX-preemption timer instead of
184 * this one when possible.
185 */
186 PCFGMNODE pCfgVMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "VMM");
187 rc = CFGMR3QueryBoolDef(pCfgVMM, "UsePeriodicPreemptionTimers", &pVM->vmm.s.fUsePeriodicPreemptionTimers, true);
188 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"VMM/UsePeriodicPreemptionTimers\", rc=%Rrc\n", rc), rc);
189
190 /*
191 * Initialize the VMM rendezvous semaphores.
192 */
193 pVM->vmm.s.pahEvtRendezvousEnterOrdered = (PRTSEMEVENT)MMR3HeapAlloc(pVM, MM_TAG_VMM, sizeof(RTSEMEVENT) * pVM->cCpus);
194 if (!pVM->vmm.s.pahEvtRendezvousEnterOrdered)
195 return VERR_NO_MEMORY;
196 for (VMCPUID i = 0; i < pVM->cCpus; i++)
197 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
198 for (VMCPUID i = 0; i < pVM->cCpus; i++)
199 {
200 rc = RTSemEventCreate(&pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
201 AssertRCReturn(rc, rc);
202 }
203 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousEnterOneByOne);
204 AssertRCReturn(rc, rc);
205 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
206 AssertRCReturn(rc, rc);
207 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousDone);
208 AssertRCReturn(rc, rc);
209 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
210 AssertRCReturn(rc, rc);
211
212 /* GC switchers are enabled by default. Turned off by HWACCM. */
213 pVM->vmm.s.fSwitcherDisabled = false;
214
215 /*
216 * Register the saved state data unit.
217 */
218 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
219 NULL, NULL, NULL,
220 NULL, vmmR3Save, NULL,
221 NULL, vmmR3Load, NULL);
222 if (RT_FAILURE(rc))
223 return rc;
224
225 /*
226 * Register the Ring-0 VM handle with the session for fast ioctl calls.
227 */
228 rc = SUPR3SetVMForFastIOCtl(pVM->pVMR0);
229 if (RT_FAILURE(rc))
230 return rc;
231
232 /*
233 * Init various sub-components.
234 */
235 rc = vmmR3SwitcherInit(pVM);
236 if (RT_SUCCESS(rc))
237 {
238 rc = vmmR3InitStacks(pVM);
239 if (RT_SUCCESS(rc))
240 {
241 rc = vmmR3InitLoggers(pVM);
242
243#ifdef VBOX_WITH_NMI
244 /*
245 * Allocate mapping for the host APIC.
246 */
247 if (RT_SUCCESS(rc))
248 {
249 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
250 AssertRC(rc);
251 }
252#endif
253 if (RT_SUCCESS(rc))
254 {
255 /*
256 * Debug info and statistics.
257 */
258 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
259 vmmR3InitRegisterStats(pVM);
260 vmmInitFormatTypes();
261
262 return VINF_SUCCESS;
263 }
264 }
265 /** @todo: Need failure cleanup. */
266
267 //more todo in here?
268 //if (RT_SUCCESS(rc))
269 //{
270 //}
271 //int rc2 = vmmR3TermCoreCode(pVM);
272 //AssertRC(rc2));
273 }
274
275 return rc;
276}
277
278
279/**
280 * Allocate & setup the VMM RC stack(s) (for EMTs).
281 *
282 * The stacks are also used for long jumps in Ring-0.
283 *
284 * @returns VBox status code.
285 * @param pVM Pointer to the shared VM structure.
286 *
287 * @remarks The optional guard page gets it protection setup up during R3 init
288 * completion because of init order issues.
289 */
290static int vmmR3InitStacks(PVM pVM)
291{
292 int rc = VINF_SUCCESS;
293#ifdef VMM_R0_SWITCH_STACK
294 uint32_t fFlags = MMHYPER_AONR_FLAGS_KERNEL_MAPPING;
295#else
296 uint32_t fFlags = 0;
297#endif
298
299 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
300 {
301 PVMCPU pVCpu = &pVM->aCpus[idCpu];
302
303#ifdef VBOX_STRICT_VMM_STACK
304 rc = MMR3HyperAllocOnceNoRelEx(pVM, PAGE_SIZE + VMM_STACK_SIZE + PAGE_SIZE,
305#else
306 rc = MMR3HyperAllocOnceNoRelEx(pVM, VMM_STACK_SIZE,
307#endif
308 PAGE_SIZE, MM_TAG_VMM, fFlags, (void **)&pVCpu->vmm.s.pbEMTStackR3);
309 if (RT_SUCCESS(rc))
310 {
311#ifdef VBOX_STRICT_VMM_STACK
312 pVCpu->vmm.s.pbEMTStackR3 += PAGE_SIZE;
313#endif
314#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
315 /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
316 if (!VMMIsHwVirtExtForced(pVM))
317 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = NIL_RTR0PTR;
318 else
319#endif
320 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
321 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
322 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
323 AssertRelease(pVCpu->vmm.s.pbEMTStackRC);
324
325 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC);
326 }
327 }
328
329 return rc;
330}
331
332
333/**
334 * Initialize the loggers.
335 *
336 * @returns VBox status code.
337 * @param pVM Pointer to the shared VM structure.
338 */
339static int vmmR3InitLoggers(PVM pVM)
340{
341 int rc;
342#define RTLogCalcSizeForR0(cGroups, fFlags) (RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[cGroups]) + PAGE_SIZE)
343
344 /*
345 * Allocate RC & R0 Logger instances (they are finalized in the relocator).
346 */
347#ifdef LOG_ENABLED
348 PRTLOGGER pLogger = RTLogDefaultInstance();
349 if (pLogger)
350 {
351 pVM->vmm.s.cbRCLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
352 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
353 if (RT_FAILURE(rc))
354 return rc;
355 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
356
357# ifdef VBOX_WITH_R0_LOGGING
358 size_t const cbLogger = RTLogCalcSizeForR0(pLogger->cGroups, 0);
359 for (VMCPUID i = 0; i < pVM->cCpus; i++)
360 {
361 PVMCPU pVCpu = &pVM->aCpus[i];
362 rc = MMR3HyperAllocOnceNoRelEx(pVM, cbLogger, PAGE_SIZE, MM_TAG_VMM, MMHYPER_AONR_FLAGS_KERNEL_MAPPING,
363 (void **)&pVCpu->vmm.s.pR0LoggerR3);
364 if (RT_FAILURE(rc))
365 return rc;
366 pVCpu->vmm.s.pR0LoggerR3->pVM = pVM->pVMR0;
367 //pVCpu->vmm.s.pR0LoggerR3->fCreated = false;
368 pVCpu->vmm.s.pR0LoggerR3->cbLogger = (uint32_t)cbLogger;
369 pVCpu->vmm.s.pR0LoggerR0 = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pR0LoggerR3);
370 }
371# endif
372 }
373#endif /* LOG_ENABLED */
374
375#ifdef VBOX_WITH_RC_RELEASE_LOGGING
376 /*
377 * Allocate RC release logger instances (finalized in the relocator).
378 */
379 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
380 if (pRelLogger)
381 {
382 pVM->vmm.s.cbRCRelLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
383 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
384 if (RT_FAILURE(rc))
385 return rc;
386 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
387 }
388#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
389 return VINF_SUCCESS;
390}
391
392
393/**
394 * VMMR3Init worker that register the statistics with STAM.
395 *
396 * @param pVM The shared VM structure.
397 */
398static void vmmR3InitRegisterStats(PVM pVM)
399{
400 /*
401 * Statistics.
402 */
403 STAM_REG(pVM, &pVM->vmm.s.StatRunRC, STAMTYPE_COUNTER, "/VMM/RunRC", STAMUNIT_OCCURENCES, "Number of context switches.");
404 STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
405 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
406 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
407 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
408 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
409 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
410 STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
411 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
412 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
413 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOBlockEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/EmulateIOBlock", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_IO_BLOCK returns.");
414 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
415 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_READ returns.");
416 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_WRITE returns.");
417 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ returns.");
418 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_WRITE returns.");
419 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ_WRITE returns.");
420 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
421 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
422 STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
423 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
424 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
425 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
426 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPDFault, STAMTYPE_COUNTER, "/VMM/RZRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
427 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
428 STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
429 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
430 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
431 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
432 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
433 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
434 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
435 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
436 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Unknown, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Unknown", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
437 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3TMVirt, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/TMVirt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
438 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3HandyPages, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Handy", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
439 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3PDMQueues, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/PDMQueue", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
440 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Rendezvous, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Rendezvous", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
441 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Timer, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Timer", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
442 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3DMA, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/DMA", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
443 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3CritSect, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/CritSect", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
444 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
445 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
446 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
447 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
448 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
449 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
450 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HWACCM_PATCH_TPR_INSTR returns.");
451 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
452 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_LOCK calls.");
453 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMCritSectEnter, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMCritSectEnter", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_CRITSECT_ENTER calls.");
454 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_LOCK calls.");
455 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMPoolGrow", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_POOL_GROW calls.");
456 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_MAP_CHUNK calls.");
457 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES calls.");
458 STAM_REG(pVM, &pVM->vmm.s.StatRZCallRemReplay, STAMTYPE_COUNTER, "/VMM/RZCallR3/REMReplay", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS calls.");
459 STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VMM_LOGGER_FLUSH calls.");
460 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_ERROR calls.");
461 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_RUNTIME_ERROR calls.");
462
463#ifdef VBOX_WITH_STATISTICS
464 for (VMCPUID i = 0; i < pVM->cCpus; i++)
465 {
466 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedMax, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i);
467 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedAvg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Average stack usage.", "/VMM/Stack/CPU%u/Avg", i);
468 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cUsedTotal, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of stack usages.", "/VMM/Stack/CPU%u/Uses", i);
469 }
470#endif
471}
472
473
474/**
475 * Initializes the R0 VMM.
476 *
477 * @returns VBox status code.
478 * @param pVM The VM to operate on.
479 */
480VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
481{
482 int rc;
483 PVMCPU pVCpu = VMMGetCpu(pVM);
484 Assert(pVCpu && pVCpu->idCpu == 0);
485
486#ifdef LOG_ENABLED
487 /*
488 * Initialize the ring-0 logger if we haven't done so yet.
489 */
490 if ( pVCpu->vmm.s.pR0LoggerR3
491 && !pVCpu->vmm.s.pR0LoggerR3->fCreated)
492 {
493 rc = VMMR3UpdateLoggers(pVM);
494 if (RT_FAILURE(rc))
495 return rc;
496 }
497#endif
498
499 /*
500 * Call Ring-0 entry with init code.
501 */
502 for (;;)
503 {
504#ifdef NO_SUPCALLR0VMM
505 //rc = VERR_GENERAL_FAILURE;
506 rc = VINF_SUCCESS;
507#else
508 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, VMMGetSvnRev(), NULL);
509#endif
510 /*
511 * Flush the logs.
512 */
513#ifdef LOG_ENABLED
514 if ( pVCpu->vmm.s.pR0LoggerR3
515 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
516 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
517#endif
518 if (rc != VINF_VMM_CALL_HOST)
519 break;
520 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
521 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
522 break;
523 /* Resume R0 */
524 }
525
526 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
527 {
528 LogRel(("R0 init failed, rc=%Rra\n", rc));
529 if (RT_SUCCESS(rc))
530 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
531 }
532 return rc;
533}
534
535
536/**
537 * Initializes the RC VMM.
538 *
539 * @returns VBox status code.
540 * @param pVM The VM to operate on.
541 */
542VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM)
543{
544 PVMCPU pVCpu = VMMGetCpu(pVM);
545 Assert(pVCpu && pVCpu->idCpu == 0);
546
547 /* In VMX mode, there's no need to init RC. */
548 if (pVM->vmm.s.fSwitcherDisabled)
549 return VINF_SUCCESS;
550
551 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
552
553 /*
554 * Call VMMGCInit():
555 * -# resolve the address.
556 * -# setup stackframe and EIP to use the trampoline.
557 * -# do a generic hypervisor call.
558 */
559 RTRCPTR RCPtrEP;
560 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
561 if (RT_SUCCESS(rc))
562 {
563 CPUMHyperSetCtxCore(pVCpu, NULL);
564 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
565 uint64_t u64TS = RTTimeProgramStartNanoTS();
566 CPUMPushHyper(pVCpu, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */
567 CPUMPushHyper(pVCpu, (uint32_t)u64TS); /* Param 3: The program startup TS - Lo. */
568 CPUMPushHyper(pVCpu, VMMGetSvnRev()); /* Param 2: Version argument. */
569 CPUMPushHyper(pVCpu, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
570 CPUMPushHyper(pVCpu, pVM->pVMRC); /* Param 0: pVM */
571 CPUMPushHyper(pVCpu, 5 * sizeof(RTRCPTR)); /* trampoline param: stacksize. */
572 CPUMPushHyper(pVCpu, RCPtrEP); /* Call EIP. */
573 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
574 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
575
576 for (;;)
577 {
578#ifdef NO_SUPCALLR0VMM
579 //rc = VERR_GENERAL_FAILURE;
580 rc = VINF_SUCCESS;
581#else
582 rc = SUPR3CallVMMR0(pVM->pVMR0, 0 /* VCPU 0 */, VMMR0_DO_CALL_HYPERVISOR, NULL);
583#endif
584#ifdef LOG_ENABLED
585 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
586 if ( pLogger
587 && pLogger->offScratch > 0)
588 RTLogFlushRC(NULL, pLogger);
589#endif
590#ifdef VBOX_WITH_RC_RELEASE_LOGGING
591 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
592 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
593 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
594#endif
595 if (rc != VINF_VMM_CALL_HOST)
596 break;
597 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
598 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
599 break;
600 }
601
602 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
603 {
604 VMMR3FatalDump(pVM, pVCpu, rc);
605 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
606 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
607 }
608 AssertRC(rc);
609 }
610 return rc;
611}
612
613
614/**
615 * Called when an init phase completes.
616 *
617 * @returns VBox status code.
618 * @param pVM The VM handle.
619 * @param enmWhat Which init phase.
620 */
621VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
622{
623 int rc = VINF_SUCCESS;
624
625 switch (enmWhat)
626 {
627 case VMINITCOMPLETED_RING3:
628 {
629 /*
630 * Set page attributes to r/w for stack pages.
631 */
632 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
633 {
634 rc = PGMMapSetPage(pVM, pVM->aCpus[idCpu].vmm.s.pbEMTStackRC, VMM_STACK_SIZE,
635 X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
636 AssertRCReturn(rc, rc);
637 }
638
639 /*
640 * Create the EMT yield timer.
641 */
642 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
643 AssertRCReturn(rc, rc);
644
645 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
646 AssertRCReturn(rc, rc);
647
648#ifdef VBOX_WITH_NMI
649 /*
650 * Map the host APIC into GC - This is AMD/Intel + Host OS specific!
651 */
652 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
653 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
654 AssertRCReturn(rc, rc);
655#endif
656
657#ifdef VBOX_STRICT_VMM_STACK
658 /*
659 * Setup the stack guard pages: Two inaccessible pages at each sides of the
660 * stack to catch over/under-flows.
661 */
662 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
663 {
664 uint8_t *pbEMTStackR3 = pVM->aCpus[idCpu].vmm.s.pbEMTStackR3;
665
666 memset(pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE);
667 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, true /*fSet*/);
668
669 memset(pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
670 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, true /*fSet*/);
671 }
672 pVM->vmm.s.fStackGuardsStationed = true;
673#endif
674 break;
675 }
676
677 case VMINITCOMPLETED_RING0:
678 {
679 /*
680 * Disable the periodic preemption timers if we can use the
681 * VMX-preemption timer instead.
682 */
683 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
684 && HWACCMR3IsVmxPreemptionTimerUsed(pVM))
685 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
686 LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
687 break;
688 }
689
690 default: /* shuts up gcc */
691 break;
692 }
693
694 return rc;
695}
696
697
698/**
699 * Terminate the VMM bits.
700 *
701 * @returns VINF_SUCCESS.
702 * @param pVM The VM handle.
703 */
704VMMR3_INT_DECL(int) VMMR3Term(PVM pVM)
705{
706 PVMCPU pVCpu = VMMGetCpu(pVM);
707 Assert(pVCpu && pVCpu->idCpu == 0);
708
709 /*
710 * Call Ring-0 entry with termination code.
711 */
712 int rc;
713 for (;;)
714 {
715#ifdef NO_SUPCALLR0VMM
716 //rc = VERR_GENERAL_FAILURE;
717 rc = VINF_SUCCESS;
718#else
719 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
720#endif
721 /*
722 * Flush the logs.
723 */
724#ifdef LOG_ENABLED
725 if ( pVCpu->vmm.s.pR0LoggerR3
726 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
727 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
728#endif
729 if (rc != VINF_VMM_CALL_HOST)
730 break;
731 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
732 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
733 break;
734 /* Resume R0 */
735 }
736 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
737 {
738 LogRel(("VMMR3Term: R0 term failed, rc=%Rra. (warning)\n", rc));
739 if (RT_SUCCESS(rc))
740 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
741 }
742
743 for (VMCPUID i = 0; i < pVM->cCpus; i++)
744 {
745 RTSemEventDestroy(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
746 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
747 }
748 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
749 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
750 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
751 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
752 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousDone);
753 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
754 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousDoneCaller);
755 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
756
757#ifdef VBOX_STRICT_VMM_STACK
758 /*
759 * Make the two stack guard pages present again.
760 */
761 if (pVM->vmm.s.fStackGuardsStationed)
762 {
763 for (VMCPUID i = 0; i < pVM->cCpus; i++)
764 {
765 uint8_t *pbEMTStackR3 = pVM->aCpus[i].vmm.s.pbEMTStackR3;
766 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, false /*fSet*/);
767 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, false /*fSet*/);
768 }
769 pVM->vmm.s.fStackGuardsStationed = false;
770 }
771#endif
772
773 vmmTermFormatTypes();
774 return rc;
775}
776
777
778/**
779 * Applies relocations to data and code managed by this
780 * component. This function will be called at init and
781 * whenever the VMM need to relocate it self inside the GC.
782 *
783 * The VMM will need to apply relocations to the core code.
784 *
785 * @param pVM The VM handle.
786 * @param offDelta The relocation delta.
787 */
788VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
789{
790 LogFlow(("VMMR3Relocate: offDelta=%RGv\n", offDelta));
791
792 /*
793 * Recalc the RC address.
794 */
795#ifdef VBOX_WITH_RAW_MODE
796 pVM->vmm.s.pvCoreCodeRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pvCoreCodeR3);
797#endif
798
799 /*
800 * The stack.
801 */
802 for (VMCPUID i = 0; i < pVM->cCpus; i++)
803 {
804 PVMCPU pVCpu = &pVM->aCpus[i];
805
806 CPUMSetHyperESP(pVCpu, CPUMGetHyperESP(pVCpu) + offDelta);
807
808 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
809 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
810 }
811
812 /*
813 * All the switchers.
814 */
815 vmmR3SwitcherRelocate(pVM, offDelta);
816
817 /*
818 * Get other RC entry points.
819 */
820 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
821 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Rra\n", rc));
822
823 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
824 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Rra\n", rc));
825
826 /*
827 * Update the logger.
828 */
829 VMMR3UpdateLoggers(pVM);
830}
831
832
833/**
834 * Updates the settings for the RC and R0 loggers.
835 *
836 * @returns VBox status code.
837 * @param pVM The VM handle.
838 */
839VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
840{
841 /*
842 * Simply clone the logger instance (for RC).
843 */
844 int rc = VINF_SUCCESS;
845 RTRCPTR RCPtrLoggerFlush = 0;
846
847 if (pVM->vmm.s.pRCLoggerR3
848#ifdef VBOX_WITH_RC_RELEASE_LOGGING
849 || pVM->vmm.s.pRCRelLoggerR3
850#endif
851 )
852 {
853 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &RCPtrLoggerFlush);
854 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Rra\n", rc));
855 }
856
857 if (pVM->vmm.s.pRCLoggerR3)
858 {
859 RTRCPTR RCPtrLoggerWrapper = 0;
860 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &RCPtrLoggerWrapper);
861 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Rra\n", rc));
862
863 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
864 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pRCLoggerR3, pVM->vmm.s.cbRCLogger,
865 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
866 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
867 }
868
869#ifdef VBOX_WITH_RC_RELEASE_LOGGING
870 if (pVM->vmm.s.pRCRelLoggerR3)
871 {
872 RTRCPTR RCPtrLoggerWrapper = 0;
873 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper);
874 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Rra\n", rc));
875
876 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
877 rc = RTLogCloneRC(RTLogRelDefaultInstance(), pVM->vmm.s.pRCRelLoggerR3, pVM->vmm.s.cbRCRelLogger,
878 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
879 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
880 }
881#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
882
883#ifdef LOG_ENABLED
884 /*
885 * For the ring-0 EMT logger, we use a per-thread logger instance
886 * in ring-0. Only initialize it once.
887 */
888 PRTLOGGER const pDefault = RTLogDefaultInstance();
889 for (VMCPUID i = 0; i < pVM->cCpus; i++)
890 {
891 PVMCPU pVCpu = &pVM->aCpus[i];
892 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3;
893 if (pR0LoggerR3)
894 {
895 if (!pR0LoggerR3->fCreated)
896 {
897 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
898 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
899 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerWrapper not found! rc=%Rra\n", rc), rc);
900
901 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
902 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
903 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerFlush not found! rc=%Rra\n", rc), rc);
904
905 rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
906 pfnLoggerWrapper, pfnLoggerFlush,
907 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
908 AssertReleaseMsgRCReturn(rc, ("RTLogCreateForR0 failed! rc=%Rra\n", rc), rc);
909
910 RTR0PTR pfnLoggerPrefix = NIL_RTR0PTR;
911 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerPrefix", &pfnLoggerPrefix);
912 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerPrefix not found! rc=%Rra\n", rc), rc);
913 rc = RTLogSetCustomPrefixCallbackForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger), pfnLoggerPrefix, NIL_RTR0PTR);
914 AssertReleaseMsgRCReturn(rc, ("RTLogSetCustomPrefixCallback failed! rc=%Rra\n", rc), rc);
915
916 pR0LoggerR3->idCpu = i;
917 pR0LoggerR3->fCreated = true;
918 pR0LoggerR3->fFlushingDisabled = false;
919
920 }
921
922 rc = RTLogCopyGroupsAndFlagsForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger), pDefault,
923 RTLOGFLAGS_BUFFERED, UINT32_MAX);
924 AssertRC(rc);
925 }
926 }
927#endif
928 return rc;
929}
930
931
932/**
933 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg1Weak output.
934 *
935 * @returns Pointer to the buffer.
936 * @param pVM The VM handle.
937 */
938VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
939{
940 if (HWACCMIsEnabled(pVM))
941 return pVM->vmm.s.szRing0AssertMsg1;
942
943 RTRCPTR RCPtr;
944 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg1", &RCPtr);
945 if (RT_SUCCESS(rc))
946 return (const char *)MMHyperRCToR3(pVM, RCPtr);
947
948 return NULL;
949}
950
951
952/**
953 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg2Weak output.
954 *
955 * @returns Pointer to the buffer.
956 * @param pVM The VM handle.
957 */
958VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
959{
960 if (HWACCMIsEnabled(pVM))
961 return pVM->vmm.s.szRing0AssertMsg2;
962
963 RTRCPTR RCPtr;
964 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg2", &RCPtr);
965 if (RT_SUCCESS(rc))
966 return (const char *)MMHyperRCToR3(pVM, RCPtr);
967
968 return NULL;
969}
970
971
972/**
973 * Execute state save operation.
974 *
975 * @returns VBox status code.
976 * @param pVM VM Handle.
977 * @param pSSM SSM operation handle.
978 */
979static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
980{
981 LogFlow(("vmmR3Save:\n"));
982
983 /*
984 * Save the started/stopped state of all CPUs except 0 as it will always
985 * be running. This avoids breaking the saved state version. :-)
986 */
987 for (VMCPUID i = 1; i < pVM->cCpus; i++)
988 SSMR3PutBool(pSSM, VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(&pVM->aCpus[i])));
989
990 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
991}
992
993
994/**
995 * Execute state load operation.
996 *
997 * @returns VBox status code.
998 * @param pVM VM Handle.
999 * @param pSSM SSM operation handle.
1000 * @param uVersion Data layout version.
1001 * @param uPass The data pass.
1002 */
1003static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1004{
1005 LogFlow(("vmmR3Load:\n"));
1006 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1007
1008 /*
1009 * Validate version.
1010 */
1011 if ( uVersion != VMM_SAVED_STATE_VERSION
1012 && uVersion != VMM_SAVED_STATE_VERSION_3_0)
1013 {
1014 AssertMsgFailed(("vmmR3Load: Invalid version uVersion=%u!\n", uVersion));
1015 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1016 }
1017
1018 if (uVersion <= VMM_SAVED_STATE_VERSION_3_0)
1019 {
1020 /* Ignore the stack bottom, stack pointer and stack bits. */
1021 RTRCPTR RCPtrIgnored;
1022 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1023 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1024#ifdef RT_OS_DARWIN
1025 if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(3,0,0)
1026 && SSMR3HandleVersion(pSSM) < VBOX_FULL_VERSION_MAKE(3,1,0)
1027 && SSMR3HandleRevision(pSSM) >= 48858
1028 && ( !strcmp(SSMR3HandleHostOSAndArch(pSSM), "darwin.x86")
1029 || !strcmp(SSMR3HandleHostOSAndArch(pSSM), "") )
1030 )
1031 SSMR3Skip(pSSM, 16384);
1032 else
1033 SSMR3Skip(pSSM, 8192);
1034#else
1035 SSMR3Skip(pSSM, 8192);
1036#endif
1037 }
1038
1039 /*
1040 * Restore the VMCPU states. VCPU 0 is always started.
1041 */
1042 VMCPU_SET_STATE(&pVM->aCpus[0], VMCPUSTATE_STARTED);
1043 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1044 {
1045 bool fStarted;
1046 int rc = SSMR3GetBool(pSSM, &fStarted);
1047 if (RT_FAILURE(rc))
1048 return rc;
1049 VMCPU_SET_STATE(&pVM->aCpus[i], fStarted ? VMCPUSTATE_STARTED : VMCPUSTATE_STOPPED);
1050 }
1051
1052 /* terminator */
1053 uint32_t u32;
1054 int rc = SSMR3GetU32(pSSM, &u32);
1055 if (RT_FAILURE(rc))
1056 return rc;
1057 if (u32 != UINT32_MAX)
1058 {
1059 AssertMsgFailed(("u32=%#x\n", u32));
1060 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1061 }
1062 return VINF_SUCCESS;
1063}
1064
1065
1066/**
1067 * Resolve a builtin RC symbol.
1068 *
1069 * Called by PDM when loading or relocating RC modules.
1070 *
1071 * @returns VBox status
1072 * @param pVM VM Handle.
1073 * @param pszSymbol Symbol to resolv
1074 * @param pRCPtrValue Where to store the symbol value.
1075 *
1076 * @remark This has to work before VMMR3Relocate() is called.
1077 */
1078VMMR3_INT_DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue)
1079{
1080 if (!strcmp(pszSymbol, "g_Logger"))
1081 {
1082 if (pVM->vmm.s.pRCLoggerR3)
1083 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
1084 *pRCPtrValue = pVM->vmm.s.pRCLoggerRC;
1085 }
1086 else if (!strcmp(pszSymbol, "g_RelLogger"))
1087 {
1088#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1089 if (pVM->vmm.s.pRCRelLoggerR3)
1090 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
1091 *pRCPtrValue = pVM->vmm.s.pRCRelLoggerRC;
1092#else
1093 *pRCPtrValue = NIL_RTRCPTR;
1094#endif
1095 }
1096 else
1097 return VERR_SYMBOL_NOT_FOUND;
1098 return VINF_SUCCESS;
1099}
1100
1101
1102/**
1103 * Suspends the CPU yielder.
1104 *
1105 * @param pVM The VM handle.
1106 */
1107VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM)
1108{
1109 VMCPU_ASSERT_EMT(&pVM->aCpus[0]);
1110 if (!pVM->vmm.s.cYieldResumeMillies)
1111 {
1112 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1113 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1114 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1115 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1116 else
1117 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1118 TMTimerStop(pVM->vmm.s.pYieldTimer);
1119 }
1120 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1121}
1122
1123
1124/**
1125 * Stops the CPU yielder.
1126 *
1127 * @param pVM The VM handle.
1128 */
1129VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM)
1130{
1131 if (!pVM->vmm.s.cYieldResumeMillies)
1132 TMTimerStop(pVM->vmm.s.pYieldTimer);
1133 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1134 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1135}
1136
1137
1138/**
1139 * Resumes the CPU yielder when it has been a suspended or stopped.
1140 *
1141 * @param pVM The VM handle.
1142 */
1143VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM)
1144{
1145 if (pVM->vmm.s.cYieldResumeMillies)
1146 {
1147 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1148 pVM->vmm.s.cYieldResumeMillies = 0;
1149 }
1150}
1151
1152
1153/**
1154 * Internal timer callback function.
1155 *
1156 * @param pVM The VM.
1157 * @param pTimer The timer handle.
1158 * @param pvUser User argument specified upon timer creation.
1159 */
1160static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1161{
1162 NOREF(pvUser);
1163
1164 /*
1165 * This really needs some careful tuning. While we shouldn't be too greedy since
1166 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1167 * because that'll cause us to stop up.
1168 *
1169 * The current logic is to use the default interval when there is no lag worth
1170 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1171 *
1172 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1173 * so the lag is up to date.)
1174 */
1175 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1176 if ( u64Lag < 50000000 /* 50ms */
1177 || ( u64Lag < 1000000000 /* 1s */
1178 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1179 )
1180 {
1181 uint64_t u64Elapsed = RTTimeNanoTS();
1182 pVM->vmm.s.u64LastYield = u64Elapsed;
1183
1184 RTThreadYield();
1185
1186#ifdef LOG_ENABLED
1187 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1188 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1189#endif
1190 }
1191 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1192}
1193
1194
1195/**
1196 * Executes guest code in the raw-mode context.
1197 *
1198 * @param pVM VM handle.
1199 * @param pVCpu The VMCPU to operate on.
1200 */
1201VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu)
1202{
1203 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1204
1205 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1206
1207 /*
1208 * Set the EIP and ESP.
1209 */
1210 CPUMSetHyperEIP(pVCpu, CPUMGetGuestEFlags(pVCpu) & X86_EFL_VM
1211 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86
1212 : pVM->vmm.s.pfnCPUMRCResumeGuest);
1213 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC);
1214
1215 /*
1216 * We hide log flushes (outer) and hypervisor interrupts (inner).
1217 */
1218 for (;;)
1219 {
1220#ifdef VBOX_STRICT
1221 if (RT_UNLIKELY(!CPUMGetHyperCR3(pVCpu) || CPUMGetHyperCR3(pVCpu) != PGMGetHyperCR3(pVCpu)))
1222 EMR3FatalError(pVCpu, VERR_VMM_HYPER_CR3_MISMATCH);
1223 PGMMapCheck(pVM);
1224#endif
1225 int rc;
1226 do
1227 {
1228#ifdef NO_SUPCALLR0VMM
1229 rc = VERR_GENERAL_FAILURE;
1230#else
1231 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1232 if (RT_LIKELY(rc == VINF_SUCCESS))
1233 rc = pVCpu->vmm.s.iLastGZRc;
1234#endif
1235 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1236
1237 /*
1238 * Flush the logs.
1239 */
1240#ifdef LOG_ENABLED
1241 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1242 if ( pLogger
1243 && pLogger->offScratch > 0)
1244 RTLogFlushRC(NULL, pLogger);
1245#endif
1246#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1247 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1248 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1249 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
1250#endif
1251 if (rc != VINF_VMM_CALL_HOST)
1252 {
1253 Log2(("VMMR3RawRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1254 return rc;
1255 }
1256 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1257 if (RT_FAILURE(rc))
1258 return rc;
1259 /* Resume GC */
1260 }
1261}
1262
1263
1264/**
1265 * Executes guest code (Intel VT-x and AMD-V).
1266 *
1267 * @param pVM VM handle.
1268 * @param pVCpu The VMCPU to operate on.
1269 */
1270VMMR3_INT_DECL(int) VMMR3HwAccRunGC(PVM pVM, PVMCPU pVCpu)
1271{
1272 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1273
1274 for (;;)
1275 {
1276 int rc;
1277 do
1278 {
1279#ifdef NO_SUPCALLR0VMM
1280 rc = VERR_GENERAL_FAILURE;
1281#else
1282 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN, pVCpu->idCpu);
1283 if (RT_LIKELY(rc == VINF_SUCCESS))
1284 rc = pVCpu->vmm.s.iLastGZRc;
1285#endif
1286 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1287
1288#if 0 /* todo triggers too often */
1289 Assert(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TO_R3));
1290#endif
1291
1292#ifdef LOG_ENABLED
1293 /*
1294 * Flush the log
1295 */
1296 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3;
1297 if ( pR0LoggerR3
1298 && pR0LoggerR3->Logger.offScratch > 0)
1299 RTLogFlushR0(NULL, &pR0LoggerR3->Logger);
1300#endif /* !LOG_ENABLED */
1301 if (rc != VINF_VMM_CALL_HOST)
1302 {
1303 Log2(("VMMR3HwAccRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1304 return rc;
1305 }
1306 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1307 if (RT_FAILURE(rc))
1308 return rc;
1309 /* Resume R0 */
1310 }
1311}
1312
1313/**
1314 * VCPU worker for VMMSendSipi.
1315 *
1316 * @param pVM The VM to operate on.
1317 * @param idCpu Virtual CPU to perform SIPI on
1318 * @param uVector SIPI vector
1319 */
1320DECLCALLBACK(int) vmmR3SendSipi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1321{
1322 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1323 VMCPU_ASSERT_EMT(pVCpu);
1324
1325 /** @todo what are we supposed to do if the processor is already running? */
1326 if (EMGetState(pVCpu) != EMSTATE_WAIT_SIPI)
1327 return VERR_ACCESS_DENIED;
1328
1329
1330 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1331
1332 pCtx->cs = uVector << 8;
1333 pCtx->csHid.u64Base = uVector << 12;
1334 pCtx->csHid.u32Limit = 0x0000ffff;
1335 pCtx->rip = 0;
1336
1337 Log(("vmmR3SendSipi for VCPU %d with vector %x\n", uVector));
1338
1339# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
1340 EMSetState(pVCpu, EMSTATE_HALTED);
1341 return VINF_EM_RESCHEDULE;
1342# else /* And if we go the VMCPU::enmState way it can stay here. */
1343 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STOPPED);
1344 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1345 return VINF_SUCCESS;
1346# endif
1347}
1348
1349DECLCALLBACK(int) vmmR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1350{
1351 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1352 VMCPU_ASSERT_EMT(pVCpu);
1353
1354 Log(("vmmR3SendInitIpi for VCPU %d\n", idCpu));
1355 CPUMR3ResetCpu(pVCpu);
1356 return VINF_EM_WAIT_SIPI;
1357}
1358
1359/**
1360 * Sends SIPI to the virtual CPU by setting CS:EIP into vector-dependent state
1361 * and unhalting processor
1362 *
1363 * @param pVM The VM to operate on.
1364 * @param idCpu Virtual CPU to perform SIPI on
1365 * @param uVector SIPI vector
1366 */
1367VMMR3_INT_DECL(void) VMMR3SendSipi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1368{
1369 AssertReturnVoid(idCpu < pVM->cCpus);
1370
1371 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendSipi, 3, pVM, idCpu, uVector);
1372 AssertRC(rc);
1373}
1374
1375/**
1376 * Sends init IPI to the virtual CPU.
1377 *
1378 * @param pVM The VM to operate on.
1379 * @param idCpu Virtual CPU to perform int IPI on
1380 */
1381VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1382{
1383 AssertReturnVoid(idCpu < pVM->cCpus);
1384
1385 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
1386 AssertRC(rc);
1387}
1388
1389/**
1390 * Registers the guest memory range that can be used for patching
1391 *
1392 * @returns VBox status code.
1393 * @param pVM The VM to operate on.
1394 * @param pPatchMem Patch memory range
1395 * @param cbPatchMem Size of the memory range
1396 */
1397VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1398{
1399 VM_ASSERT_EMT(pVM);
1400 if (HWACCMIsEnabled(pVM))
1401 return HWACMMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
1402
1403 return VERR_NOT_SUPPORTED;
1404}
1405
1406/**
1407 * Deregisters the guest memory range that can be used for patching
1408 *
1409 * @returns VBox status code.
1410 * @param pVM The VM to operate on.
1411 * @param pPatchMem Patch memory range
1412 * @param cbPatchMem Size of the memory range
1413 */
1414VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1415{
1416 if (HWACCMIsEnabled(pVM))
1417 return HWACMMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
1418
1419 return VINF_SUCCESS;
1420}
1421
1422
1423/**
1424 * Count returns and have the last non-caller EMT wake up the caller.
1425 *
1426 * @returns VBox strict informational status code for EM scheduling. No failures
1427 * will be returned here, those are for the caller only.
1428 *
1429 * @param pVM The VM handle.
1430 */
1431DECL_FORCE_INLINE(int) vmmR3EmtRendezvousNonCallerReturn(PVM pVM)
1432{
1433 int rcRet = ASMAtomicReadS32(&pVM->vmm.s.i32RendezvousStatus);
1434 uint32_t cReturned = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsReturned);
1435 if (cReturned == pVM->cCpus - 1U)
1436 {
1437 int rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
1438 AssertLogRelRC(rc);
1439 }
1440
1441 AssertLogRelMsgReturn( rcRet <= VINF_SUCCESS
1442 || (rcRet >= VINF_EM_FIRST && rcRet <= VINF_EM_LAST),
1443 ("%Rrc\n", rcRet),
1444 VERR_IPE_UNEXPECTED_INFO_STATUS);
1445 return RT_SUCCESS(rcRet) ? rcRet : VINF_SUCCESS;
1446}
1447
1448
1449/**
1450 * Common worker for VMMR3EmtRendezvous and VMMR3EmtRendezvousFF.
1451 *
1452 * @returns VBox strict informational status code for EM scheduling. No failures
1453 * will be returned here, those are for the caller only. When
1454 * fIsCaller is set, VINF_SUCCESS is always returned.
1455 *
1456 * @param pVM The VM handle.
1457 * @param pVCpu The VMCPU structure for the calling EMT.
1458 * @param fIsCaller Whether we're the VMMR3EmtRendezvous caller or
1459 * not.
1460 * @param fFlags The flags.
1461 * @param pfnRendezvous The callback.
1462 * @param pvUser The user argument for the callback.
1463 */
1464static int vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
1465 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1466{
1467 int rc;
1468
1469 /*
1470 * Enter, the last EMT triggers the next callback phase.
1471 */
1472 uint32_t cEntered = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsEntered);
1473 if (cEntered != pVM->cCpus)
1474 {
1475 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1476 {
1477 /* Wait for our turn. */
1478 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, RT_INDEFINITE_WAIT);
1479 AssertLogRelRC(rc);
1480 }
1481 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1482 {
1483 /* Wait for the last EMT to arrive and wake everyone up. */
1484 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce, RT_INDEFINITE_WAIT);
1485 AssertLogRelRC(rc);
1486 }
1487 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1488 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1489 {
1490 /* Wait for our turn. */
1491 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1492 AssertLogRelRC(rc);
1493 }
1494 else
1495 {
1496 Assert((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE);
1497
1498 /*
1499 * The execute once is handled specially to optimize the code flow.
1500 *
1501 * The last EMT to arrive will perform the callback and the other
1502 * EMTs will wait on the Done/DoneCaller semaphores (instead of
1503 * the EnterOneByOne/AllAtOnce) in the meanwhile. When the callback
1504 * returns, that EMT will initiate the normal return sequence.
1505 */
1506 if (!fIsCaller)
1507 {
1508 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1509 AssertLogRelRC(rc);
1510
1511 return vmmR3EmtRendezvousNonCallerReturn(pVM);
1512 }
1513 return VINF_SUCCESS;
1514 }
1515 }
1516 else
1517 {
1518 /*
1519 * All EMTs are waiting, clear the FF and take action according to the
1520 * execution method.
1521 */
1522 VM_FF_CLEAR(pVM, VM_FF_EMT_RENDEZVOUS);
1523
1524 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1525 {
1526 /* Wake up everyone. */
1527 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
1528 AssertLogRelRC(rc);
1529 }
1530 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1531 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1532 {
1533 /* Figure out who to wake up and wake it up. If it's ourself, then
1534 it's easy otherwise wait for our turn. */
1535 VMCPUID iFirst = (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1536 ? 0
1537 : pVM->cCpus - 1U;
1538 if (pVCpu->idCpu != iFirst)
1539 {
1540 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iFirst]);
1541 AssertLogRelRC(rc);
1542 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1543 AssertLogRelRC(rc);
1544 }
1545 }
1546 /* else: execute the handler on the current EMT and wake up one or more threads afterwards. */
1547 }
1548
1549
1550 /*
1551 * Do the callback and update the status if necessary.
1552 */
1553 if ( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1554 || RT_SUCCESS(ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus)) )
1555 {
1556 VBOXSTRICTRC rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
1557 if (rcStrict != VINF_SUCCESS)
1558 {
1559 AssertLogRelMsg( rcStrict <= VINF_SUCCESS
1560 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
1561 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1562 int32_t i32RendezvousStatus;
1563 do
1564 {
1565 i32RendezvousStatus = ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus);
1566 if ( rcStrict == i32RendezvousStatus
1567 || RT_FAILURE(i32RendezvousStatus)
1568 || ( i32RendezvousStatus != VINF_SUCCESS
1569 && rcStrict > i32RendezvousStatus))
1570 break;
1571 } while (!ASMAtomicCmpXchgS32(&pVM->vmm.s.i32RendezvousStatus, VBOXSTRICTRC_VAL(rcStrict), i32RendezvousStatus));
1572 }
1573 }
1574
1575 /*
1576 * Increment the done counter and take action depending on whether we're
1577 * the last to finish callback execution.
1578 */
1579 uint32_t cDone = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsDone);
1580 if ( cDone != pVM->cCpus
1581 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE)
1582 {
1583 /* Signal the next EMT? */
1584 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1585 {
1586 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
1587 AssertLogRelRC(rc);
1588 }
1589 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
1590 {
1591 Assert(cDone == pVCpu->idCpu + 1U);
1592 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu + 1U]);
1593 AssertLogRelRC(rc);
1594 }
1595 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1596 {
1597 Assert(pVM->cCpus - cDone == pVCpu->idCpu);
1598 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVM->cCpus - cDone - 1U]);
1599 AssertLogRelRC(rc);
1600 }
1601
1602 /* Wait for the rest to finish (the caller waits on hEvtRendezvousDoneCaller). */
1603 if (!fIsCaller)
1604 {
1605 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1606 AssertLogRelRC(rc);
1607 }
1608 }
1609 else
1610 {
1611 /* Callback execution is all done, tell the rest to return. */
1612 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
1613 AssertLogRelRC(rc);
1614 }
1615
1616 if (!fIsCaller)
1617 return vmmR3EmtRendezvousNonCallerReturn(pVM);
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Called in response to VM_FF_EMT_RENDEZVOUS.
1624 *
1625 * @returns VBox strict status code - EM scheduling. No errors will be returned
1626 * here, nor will any non-EM scheduling status codes be returned.
1627 *
1628 * @param pVM The VM handle
1629 * @param pVCpu The handle of the calling EMT.
1630 *
1631 * @thread EMT
1632 */
1633VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu)
1634{
1635 Assert(!pVCpu->vmm.s.fInRendezvous);
1636 pVCpu->vmm.s.fInRendezvous = true;
1637 int rc = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1638 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1639 pVCpu->vmm.s.fInRendezvous = false;
1640 return rc;
1641}
1642
1643
1644/**
1645 * EMT rendezvous.
1646 *
1647 * Gathers all the EMTs and execute some code on each of them, either in a one
1648 * by one fashion or all at once.
1649 *
1650 * @returns VBox strict status code. This will be the the first error,
1651 * VINF_SUCCESS, or an EM scheduling status code.
1652 *
1653 * @param pVM The VM handle.
1654 * @param fFlags Flags indicating execution methods. See
1655 * grp_VMMR3EmtRendezvous_fFlags.
1656 * @param pfnRendezvous The callback.
1657 * @param pvUser User argument for the callback.
1658 *
1659 * @thread Any.
1660 */
1661VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1662{
1663 /*
1664 * Validate input.
1665 */
1666 AssertMsg( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID
1667 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) <= VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
1668 && !(fFlags & ~VMMEMTRENDEZVOUS_FLAGS_VALID_MASK), ("%#x\n", fFlags));
1669 AssertMsg( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1670 || ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE
1671 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE),
1672 ("type %u\n", fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK));
1673
1674 VBOXSTRICTRC rcStrict;
1675 PVMCPU pVCpu = VMMGetCpu(pVM);
1676 if (!pVCpu)
1677 /*
1678 * Forward the request to an EMT thread.
1679 */
1680 rcStrict = VMR3ReqCallWait(pVM, VMCPUID_ANY,
1681 (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
1682 else if (pVM->cCpus == 1)
1683 {
1684 /*
1685 * Shortcut for the single EMT case.
1686 */
1687 AssertLogRelReturn(!pVCpu->vmm.s.fInRendezvous, VERR_DEADLOCK);
1688 pVCpu->vmm.s.fInRendezvous = true;
1689 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
1690 pVCpu->vmm.s.fInRendezvous = false;
1691 }
1692 else
1693 {
1694 /*
1695 * Spin lock. If busy, wait for the other EMT to finish while keeping a
1696 * lookout of the RENDEZVOUS FF.
1697 */
1698 int rc;
1699 rcStrict = VINF_SUCCESS;
1700 if (RT_UNLIKELY(!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)))
1701 {
1702 AssertLogRelReturn(!pVCpu->vmm.s.fInRendezvous, VERR_DEADLOCK);
1703
1704 while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
1705 {
1706 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1707 {
1708 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
1709 if ( rc != VINF_SUCCESS
1710 && ( rcStrict == VINF_SUCCESS
1711 || rcStrict > rc))
1712 rcStrict = rc;
1713 /** @todo Perhaps deal with termination here? */
1714 }
1715 ASMNopPause();
1716 }
1717 }
1718 Assert(!VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS));
1719 Assert(!pVCpu->vmm.s.fInRendezvous);
1720 pVCpu->vmm.s.fInRendezvous = true;
1721
1722 /*
1723 * Clear the slate. This is a semaphore ping-pong orgy. :-)
1724 */
1725 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1726 {
1727 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i], 0);
1728 AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
1729 }
1730 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
1731 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
1732 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
1733 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
1734 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
1735 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
1736 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
1737 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
1738 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
1739 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
1740 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
1741
1742 /*
1743 * Set the FF and poke the other EMTs.
1744 */
1745 VM_FF_SET(pVM, VM_FF_EMT_RENDEZVOUS);
1746 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_POKE);
1747
1748 /*
1749 * Do the same ourselves.
1750 */
1751 vmmR3EmtRendezvousCommon(pVM, pVCpu, true /* fIsCaller */, fFlags, pfnRendezvous, pvUser);
1752
1753 /*
1754 * The caller waits for the other EMTs to be done and return before doing
1755 * the cleanup. This makes away with wakeup / reset races we would otherwise
1756 * risk in the multiple release event semaphore code (hEvtRendezvousDoneCaller).
1757 */
1758 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
1759 AssertLogRelRC(rc);
1760
1761 /*
1762 * Get the return code and clean up a little bit.
1763 */
1764 int rcMy = pVM->vmm.s.i32RendezvousStatus;
1765 ASMAtomicWriteNullPtr((void * volatile *)&pVM->vmm.s.pfnRendezvous);
1766
1767 ASMAtomicWriteU32(&pVM->vmm.s.u32RendezvousLock, 0);
1768 pVCpu->vmm.s.fInRendezvous = false;
1769
1770 /*
1771 * Merge rcStrict and rcMy.
1772 */
1773 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
1774 if ( rcMy != VINF_SUCCESS
1775 && ( rcStrict == VINF_SUCCESS
1776 || rcStrict > rcMy))
1777 rcStrict = rcMy;
1778 }
1779
1780 AssertLogRelMsgReturn( rcStrict <= VINF_SUCCESS
1781 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
1782 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
1783 VERR_IPE_UNEXPECTED_INFO_STATUS);
1784 return VBOXSTRICTRC_VAL(rcStrict);
1785}
1786
1787
1788/**
1789 * Disables/enables EMT rendezvous.
1790 *
1791 * This is used to make sure EMT rendezvous does not take place while
1792 * processing a priority request.
1793 *
1794 * @returns Old rendezvous-disabled state.
1795 * @param pVCpu The handle of the calling EMT.
1796 * @param fDisabled True if disabled, false if enabled.
1797 */
1798VMMR3_INT_DECL(bool) VMMR3EmtRendezvousSetDisabled(PVMCPU pVCpu, bool fDisabled)
1799{
1800 VMCPU_ASSERT_EMT(pVCpu);
1801 bool fOld = pVCpu->vmm.s.fInRendezvous;
1802 pVCpu->vmm.s.fInRendezvous = fDisabled;
1803 return fOld;
1804}
1805
1806
1807/**
1808 * Read from the ring 0 jump buffer stack
1809 *
1810 * @returns VBox status code.
1811 *
1812 * @param pVM Pointer to the shared VM structure.
1813 * @param idCpu The ID of the source CPU context (for the address).
1814 * @param R0Addr Where to start reading.
1815 * @param pvBuf Where to store the data we've read.
1816 * @param cbRead The number of bytes to read.
1817 */
1818VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead)
1819{
1820 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1821 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
1822
1823#ifdef VMM_R0_SWITCH_STACK
1824 RTHCUINTPTR off = R0Addr - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
1825#else
1826 RTHCUINTPTR off = pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack - (pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck - R0Addr);
1827#endif
1828 if ( off > VMM_STACK_SIZE
1829 || off + cbRead >= VMM_STACK_SIZE)
1830 return VERR_INVALID_POINTER;
1831
1832 memcpy(pvBuf, &pVCpu->vmm.s.pbEMTStackR3[off], cbRead);
1833 return VINF_SUCCESS;
1834}
1835
1836
1837/**
1838 * Calls a RC function.
1839 *
1840 * @param pVM The VM handle.
1841 * @param RCPtrEntry The address of the RC function.
1842 * @param cArgs The number of arguments in the ....
1843 * @param ... Arguments to the function.
1844 */
1845VMMR3DECL(int) VMMR3CallRC(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, ...)
1846{
1847 va_list args;
1848 va_start(args, cArgs);
1849 int rc = VMMR3CallRCV(pVM, RCPtrEntry, cArgs, args);
1850 va_end(args);
1851 return rc;
1852}
1853
1854
1855/**
1856 * Calls a RC function.
1857 *
1858 * @param pVM The VM handle.
1859 * @param RCPtrEntry The address of the RC function.
1860 * @param cArgs The number of arguments in the ....
1861 * @param args Arguments to the function.
1862 */
1863VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list args)
1864{
1865 /* Raw mode implies 1 VCPU. */
1866 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1867 PVMCPU pVCpu = &pVM->aCpus[0];
1868
1869 Log2(("VMMR3CallGCV: RCPtrEntry=%RRv cArgs=%d\n", RCPtrEntry, cArgs));
1870
1871 /*
1872 * Setup the call frame using the trampoline.
1873 */
1874 CPUMHyperSetCtxCore(pVCpu, NULL);
1875 memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
1876 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32));
1877 PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
1878 int i = cArgs;
1879 while (i-- > 0)
1880 *pFrame++ = va_arg(args, RTGCUINTPTR32);
1881
1882 CPUMPushHyper(pVCpu, cArgs * sizeof(RTGCUINTPTR32)); /* stack frame size */
1883 CPUMPushHyper(pVCpu, RCPtrEntry); /* what to call */
1884 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
1885
1886 /*
1887 * We hide log flushes (outer) and hypervisor interrupts (inner).
1888 */
1889 for (;;)
1890 {
1891 int rc;
1892 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
1893 do
1894 {
1895#ifdef NO_SUPCALLR0VMM
1896 rc = VERR_GENERAL_FAILURE;
1897#else
1898 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1899 if (RT_LIKELY(rc == VINF_SUCCESS))
1900 rc = pVCpu->vmm.s.iLastGZRc;
1901#endif
1902 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1903
1904 /*
1905 * Flush the logs.
1906 */
1907#ifdef LOG_ENABLED
1908 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1909 if ( pLogger
1910 && pLogger->offScratch > 0)
1911 RTLogFlushRC(NULL, pLogger);
1912#endif
1913#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1914 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1915 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1916 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
1917#endif
1918 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
1919 VMMR3FatalDump(pVM, pVCpu, rc);
1920 if (rc != VINF_VMM_CALL_HOST)
1921 {
1922 Log2(("VMMR3CallGCV: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1923 return rc;
1924 }
1925 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1926 if (RT_FAILURE(rc))
1927 return rc;
1928 }
1929}
1930
1931
1932/**
1933 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
1934 *
1935 * @returns VBox status code.
1936 * @param pVM The VM to operate on.
1937 * @param uOperation Operation to execute.
1938 * @param u64Arg Constant argument.
1939 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
1940 * details.
1941 */
1942VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
1943{
1944 PVMCPU pVCpu = VMMGetCpu(pVM);
1945 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
1946
1947 /*
1948 * Call Ring-0 entry with init code.
1949 */
1950 int rc;
1951 for (;;)
1952 {
1953#ifdef NO_SUPCALLR0VMM
1954 rc = VERR_GENERAL_FAILURE;
1955#else
1956 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, uOperation, u64Arg, pReqHdr);
1957#endif
1958 /*
1959 * Flush the logs.
1960 */
1961#ifdef LOG_ENABLED
1962 if ( pVCpu->vmm.s.pR0LoggerR3
1963 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
1964 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
1965#endif
1966 if (rc != VINF_VMM_CALL_HOST)
1967 break;
1968 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1969 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
1970 break;
1971 /* Resume R0 */
1972 }
1973
1974 AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
1975 ("uOperation=%u rc=%Rrc\n", uOperation, rc),
1976 VERR_IPE_UNEXPECTED_INFO_STATUS);
1977 return rc;
1978}
1979
1980
1981/**
1982 * Resumes executing hypervisor code when interrupted by a queue flush or a
1983 * debug event.
1984 *
1985 * @returns VBox status code.
1986 * @param pVM VM handle.
1987 * @param pVCpu VMCPU handle.
1988 */
1989VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu)
1990{
1991 Log(("VMMR3ResumeHyper: eip=%RRv esp=%RRv\n", CPUMGetHyperEIP(pVCpu), CPUMGetHyperESP(pVCpu)));
1992 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1993
1994 /*
1995 * We hide log flushes (outer) and hypervisor interrupts (inner).
1996 */
1997 for (;;)
1998 {
1999 int rc;
2000 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
2001 do
2002 {
2003#ifdef NO_SUPCALLR0VMM
2004 rc = VERR_GENERAL_FAILURE;
2005#else
2006 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
2007 if (RT_LIKELY(rc == VINF_SUCCESS))
2008 rc = pVCpu->vmm.s.iLastGZRc;
2009#endif
2010 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2011
2012 /*
2013 * Flush the loggers,
2014 */
2015#ifdef LOG_ENABLED
2016 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
2017 if ( pLogger
2018 && pLogger->offScratch > 0)
2019 RTLogFlushRC(NULL, pLogger);
2020#endif
2021#ifdef VBOX_WITH_RC_RELEASE_LOGGING
2022 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
2023 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2024 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
2025#endif
2026 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2027 VMMR3FatalDump(pVM, pVCpu, rc);
2028 if (rc != VINF_VMM_CALL_HOST)
2029 {
2030 Log(("VMMR3ResumeHyper: returns %Rrc\n", rc));
2031 return rc;
2032 }
2033 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2034 if (RT_FAILURE(rc))
2035 return rc;
2036 }
2037}
2038
2039
2040/**
2041 * Service a call to the ring-3 host code.
2042 *
2043 * @returns VBox status code.
2044 * @param pVM VM handle.
2045 * @param pVCpu VMCPU handle
2046 * @remark Careful with critsects.
2047 */
2048static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
2049{
2050 /*
2051 * We must also check for pending critsect exits or else we can deadlock
2052 * when entering other critsects here.
2053 */
2054 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
2055 PDMCritSectFF(pVCpu);
2056
2057 switch (pVCpu->vmm.s.enmCallRing3Operation)
2058 {
2059 /*
2060 * Acquire a critical section.
2061 */
2062 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
2063 {
2064 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectEnterEx((PPDMCRITSECT)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2065 true /*fCallRing3*/);
2066 break;
2067 }
2068
2069 /*
2070 * Acquire the PDM lock.
2071 */
2072 case VMMCALLRING3_PDM_LOCK:
2073 {
2074 pVCpu->vmm.s.rcCallRing3 = PDMR3LockCall(pVM);
2075 break;
2076 }
2077
2078 /*
2079 * Grow the PGM pool.
2080 */
2081 case VMMCALLRING3_PGM_POOL_GROW:
2082 {
2083 pVCpu->vmm.s.rcCallRing3 = PGMR3PoolGrow(pVM);
2084 break;
2085 }
2086
2087 /*
2088 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
2089 */
2090 case VMMCALLRING3_PGM_MAP_CHUNK:
2091 {
2092 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysChunkMap(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2093 break;
2094 }
2095
2096 /*
2097 * Allocates more handy pages.
2098 */
2099 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
2100 {
2101 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateHandyPages(pVM);
2102 break;
2103 }
2104
2105 /*
2106 * Allocates a large page.
2107 */
2108 case VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2109 {
2110 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateLargeHandyPage(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2111 break;
2112 }
2113
2114 /*
2115 * Acquire the PGM lock.
2116 */
2117 case VMMCALLRING3_PGM_LOCK:
2118 {
2119 pVCpu->vmm.s.rcCallRing3 = PGMR3LockCall(pVM);
2120 break;
2121 }
2122
2123 /*
2124 * Acquire the MM hypervisor heap lock.
2125 */
2126 case VMMCALLRING3_MMHYPER_LOCK:
2127 {
2128 pVCpu->vmm.s.rcCallRing3 = MMR3LockCall(pVM);
2129 break;
2130 }
2131
2132#ifdef VBOX_WITH_REM
2133 /*
2134 * Flush REM handler notifications.
2135 */
2136 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
2137 {
2138 REMR3ReplayHandlerNotifications(pVM);
2139 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2140 break;
2141 }
2142#endif
2143
2144 /*
2145 * This is a noop. We just take this route to avoid unnecessary
2146 * tests in the loops.
2147 */
2148 case VMMCALLRING3_VMM_LOGGER_FLUSH:
2149 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2150 LogAlways(("*FLUSH*\n"));
2151 break;
2152
2153 /*
2154 * Set the VM error message.
2155 */
2156 case VMMCALLRING3_VM_SET_ERROR:
2157 VMR3SetErrorWorker(pVM);
2158 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2159 break;
2160
2161 /*
2162 * Set the VM runtime error message.
2163 */
2164 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
2165 pVCpu->vmm.s.rcCallRing3 = VMR3SetRuntimeErrorWorker(pVM);
2166 break;
2167
2168 /*
2169 * Signal a ring 0 hypervisor assertion.
2170 * Cancel the longjmp operation that's in progress.
2171 */
2172 case VMMCALLRING3_VM_R0_ASSERTION:
2173 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
2174 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
2175#ifdef RT_ARCH_X86
2176 pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
2177#else
2178 pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
2179#endif
2180#ifdef VMM_R0_SWITCH_STACK
2181 *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker */
2182#endif
2183 LogRel((pVM->vmm.s.szRing0AssertMsg1));
2184 LogRel((pVM->vmm.s.szRing0AssertMsg2));
2185 return VERR_VMM_RING0_ASSERTION;
2186
2187 /*
2188 * A forced switch to ring 0 for preemption purposes.
2189 */
2190 case VMMCALLRING3_VM_R0_PREEMPT:
2191 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2192 break;
2193
2194 case VMMCALLRING3_FTM_SET_CHECKPOINT:
2195 pVCpu->vmm.s.rcCallRing3 = FTMR3SetCheckpoint(pVM, (FTMCHECKPOINTTYPE)pVCpu->vmm.s.u64CallRing3Arg);
2196 break;
2197
2198 default:
2199 AssertMsgFailed(("enmCallRing3Operation=%d\n", pVCpu->vmm.s.enmCallRing3Operation));
2200 return VERR_VMM_UNKNOWN_RING3_CALL;
2201 }
2202
2203 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
2204 return VINF_SUCCESS;
2205}
2206
2207
2208/**
2209 * Displays the Force action Flags.
2210 *
2211 * @param pVM The VM handle.
2212 * @param pHlp The output helpers.
2213 * @param pszArgs The additional arguments (ignored).
2214 */
2215static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2216{
2217 int c;
2218 uint32_t f;
2219 NOREF(pszArgs);
2220
2221#define PRINT_FLAG(prf,flag) do { \
2222 if (f & (prf##flag)) \
2223 { \
2224 static const char *s_psz = #flag; \
2225 if (!(c % 6)) \
2226 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz); \
2227 else \
2228 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2229 c++; \
2230 f &= ~(prf##flag); \
2231 } \
2232 } while (0)
2233
2234#define PRINT_GROUP(prf,grp,sfx) do { \
2235 if (f & (prf##grp##sfx)) \
2236 { \
2237 static const char *s_psz = #grp; \
2238 if (!(c % 5)) \
2239 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : " Groups:\n", s_psz); \
2240 else \
2241 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2242 c++; \
2243 } \
2244 } while (0)
2245
2246 /*
2247 * The global flags.
2248 */
2249 const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
2250 pHlp->pfnPrintf(pHlp, "Global FFs: %#RX32", fGlobalForcedActions);
2251
2252 /* show the flag mnemonics */
2253 c = 0;
2254 f = fGlobalForcedActions;
2255 PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC);
2256 PRINT_FLAG(VM_FF_,PDM_QUEUES);
2257 PRINT_FLAG(VM_FF_,PDM_DMA);
2258 PRINT_FLAG(VM_FF_,DBGF);
2259 PRINT_FLAG(VM_FF_,REQUEST);
2260 PRINT_FLAG(VM_FF_,CHECK_VM_STATE);
2261 PRINT_FLAG(VM_FF_,RESET);
2262 PRINT_FLAG(VM_FF_,EMT_RENDEZVOUS);
2263 PRINT_FLAG(VM_FF_,PGM_NEED_HANDY_PAGES);
2264 PRINT_FLAG(VM_FF_,PGM_NO_MEMORY);
2265 PRINT_FLAG(VM_FF_,PGM_POOL_FLUSH_PENDING);
2266 PRINT_FLAG(VM_FF_,REM_HANDLER_NOTIFY);
2267 PRINT_FLAG(VM_FF_,DEBUG_SUSPEND);
2268 if (f)
2269 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2270 else
2271 pHlp->pfnPrintf(pHlp, "\n");
2272
2273 /* the groups */
2274 c = 0;
2275 f = fGlobalForcedActions;
2276 PRINT_GROUP(VM_FF_,EXTERNAL_SUSPENDED,_MASK);
2277 PRINT_GROUP(VM_FF_,EXTERNAL_HALTED,_MASK);
2278 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE,_MASK);
2279 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2280 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_POST,_MASK);
2281 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY_POST,_MASK);
2282 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY,_MASK);
2283 PRINT_GROUP(VM_FF_,ALL_REM,_MASK);
2284 if (c)
2285 pHlp->pfnPrintf(pHlp, "\n");
2286
2287 /*
2288 * Per CPU flags.
2289 */
2290 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2291 {
2292 const uint32_t fLocalForcedActions = pVM->aCpus[i].fLocalForcedActions;
2293 pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX32", i, fLocalForcedActions);
2294
2295 /* show the flag mnemonics */
2296 c = 0;
2297 f = fLocalForcedActions;
2298 PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC);
2299 PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC);
2300 PRINT_FLAG(VMCPU_FF_,TIMER);
2301 PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT);
2302 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
2303 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
2304 PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
2305 PRINT_FLAG(VMCPU_FF_,TRPM_SYNC_IDT);
2306 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_TSS);
2307 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_GDT);
2308 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_LDT);
2309 PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);
2310 PRINT_FLAG(VMCPU_FF_,CSAM_SCAN_PAGE);
2311 PRINT_FLAG(VMCPU_FF_,CSAM_PENDING_ACTION);
2312 PRINT_FLAG(VMCPU_FF_,TO_R3);
2313 if (f)
2314 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2315 else
2316 pHlp->pfnPrintf(pHlp, "\n");
2317
2318 if (fLocalForcedActions & VMCPU_FF_INHIBIT_INTERRUPTS)
2319 pHlp->pfnPrintf(pHlp, " intr inhibit RIP: %RGp\n", EMGetInhibitInterruptsPC(&pVM->aCpus[i]));
2320
2321 /* the groups */
2322 c = 0;
2323 f = fLocalForcedActions;
2324 PRINT_GROUP(VMCPU_FF_,EXTERNAL_SUSPENDED,_MASK);
2325 PRINT_GROUP(VMCPU_FF_,EXTERNAL_HALTED,_MASK);
2326 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE,_MASK);
2327 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2328 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_POST,_MASK);
2329 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY_POST,_MASK);
2330 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
2331 PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
2332 PRINT_GROUP(VMCPU_FF_,HWACCM_TO_R3,_MASK);
2333 PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
2334 if (c)
2335 pHlp->pfnPrintf(pHlp, "\n");
2336 }
2337
2338#undef PRINT_FLAG
2339#undef PRINT_GROUP
2340}
2341
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette