VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMM.cpp@ 62647

Last change on this file since 62647 was 62647, checked in by vboxsync, 8 years ago

VMMR3: warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 111.4 KB
Line 
1/* $Id: VMM.cpp 62647 2016-07-28 22:02:27Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18//#define NO_SUPCALLR0VMM
19
20/** @page pg_vmm VMM - The Virtual Machine Monitor
21 *
22 * The VMM component is two things at the moment, it's a component doing a few
23 * management and routing tasks, and it's the whole virtual machine monitor
24 * thing. For hysterical reasons, it is not doing all the management that one
25 * would expect, this is instead done by @ref pg_vm. We'll address this
26 * misdesign eventually, maybe.
27 *
28 * VMM is made up of these components:
29 * - @subpage pg_cfgm
30 * - @subpage pg_cpum
31 * - @subpage pg_csam
32 * - @subpage pg_dbgf
33 * - @subpage pg_em
34 * - @subpage pg_gim
35 * - @subpage pg_gmm
36 * - @subpage pg_gvmm
37 * - @subpage pg_hm
38 * - @subpage pg_iem
39 * - @subpage pg_iom
40 * - @subpage pg_mm
41 * - @subpage pg_patm
42 * - @subpage pg_pdm
43 * - @subpage pg_pgm
44 * - @subpage pg_rem
45 * - @subpage pg_selm
46 * - @subpage pg_ssm
47 * - @subpage pg_stam
48 * - @subpage pg_tm
49 * - @subpage pg_trpm
50 * - @subpage pg_vm
51 *
52 *
53 * @see @ref grp_vmm @ref grp_vm @subpage pg_vmm_guideline @subpage pg_raw
54 *
55 *
56 * @section sec_vmmstate VMM State
57 *
58 * @image html VM_Statechart_Diagram.gif
59 *
60 * To be written.
61 *
62 *
63 * @subsection subsec_vmm_init VMM Initialization
64 *
65 * To be written.
66 *
67 *
68 * @subsection subsec_vmm_term VMM Termination
69 *
70 * To be written.
71 *
72 *
73 * @section sec_vmm_limits VMM Limits
74 *
75 * There are various resource limits imposed by the VMM and it's
76 * sub-components. We'll list some of them here.
77 *
78 * On 64-bit hosts:
79 * - Max 8191 VMs. Imposed by GVMM's handle allocation (GVMM_MAX_HANDLES),
80 * can be increased up to 64K - 1.
81 * - Max 16TB - 64KB of the host memory can be used for backing VM RAM and
82 * ROM pages. The limit is imposed by the 32-bit page ID used by GMM.
83 * - A VM can be assigned all the memory we can use (16TB), however, the
84 * Main API will restrict this to 2TB (MM_RAM_MAX_IN_MB).
85 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
86 *
87 * On 32-bit hosts:
88 * - Max 127 VMs. Imposed by GMM's per page structure.
89 * - Max 64GB - 64KB of the host memory can be used for backing VM RAM and
90 * ROM pages. The limit is imposed by the 28-bit page ID used
91 * internally in GMM. It is also limited by PAE.
92 * - A VM can be assigned all the memory GMM can allocate, however, the
93 * Main API will restrict this to 3584MB (MM_RAM_MAX_IN_MB).
94 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
95 *
96 */
97
98
99/*********************************************************************************************************************************
100* Header Files *
101*********************************************************************************************************************************/
102#define LOG_GROUP LOG_GROUP_VMM
103#include <VBox/vmm/vmm.h>
104#include <VBox/vmm/vmapi.h>
105#include <VBox/vmm/pgm.h>
106#include <VBox/vmm/cfgm.h>
107#include <VBox/vmm/pdmqueue.h>
108#include <VBox/vmm/pdmcritsect.h>
109#include <VBox/vmm/pdmcritsectrw.h>
110#include <VBox/vmm/pdmapi.h>
111#include <VBox/vmm/cpum.h>
112#include <VBox/vmm/gim.h>
113#include <VBox/vmm/mm.h>
114#include <VBox/vmm/iom.h>
115#include <VBox/vmm/trpm.h>
116#include <VBox/vmm/selm.h>
117#include <VBox/vmm/em.h>
118#include <VBox/sup.h>
119#include <VBox/vmm/dbgf.h>
120#include <VBox/vmm/csam.h>
121#include <VBox/vmm/patm.h>
122#ifdef VBOX_WITH_NEW_APIC
123# include <VBox/vmm/apic.h>
124#endif
125#ifdef VBOX_WITH_REM
126# include <VBox/vmm/rem.h>
127#endif
128#include <VBox/vmm/ssm.h>
129#include <VBox/vmm/ftm.h>
130#include <VBox/vmm/tm.h>
131#include "VMMInternal.h"
132#include "VMMSwitcher.h"
133#include <VBox/vmm/vm.h>
134#include <VBox/vmm/uvm.h>
135
136#include <VBox/err.h>
137#include <VBox/param.h>
138#include <VBox/version.h>
139#include <VBox/vmm/hm.h>
140#include <iprt/assert.h>
141#include <iprt/alloc.h>
142#include <iprt/asm.h>
143#include <iprt/time.h>
144#include <iprt/semaphore.h>
145#include <iprt/stream.h>
146#include <iprt/string.h>
147#include <iprt/stdarg.h>
148#include <iprt/ctype.h>
149#include <iprt/x86.h>
150
151
152/*********************************************************************************************************************************
153* Defined Constants And Macros *
154*********************************************************************************************************************************/
155/** The saved state version. */
156#define VMM_SAVED_STATE_VERSION 4
157/** The saved state version used by v3.0 and earlier. (Teleportation) */
158#define VMM_SAVED_STATE_VERSION_3_0 3
159
160
161/*********************************************************************************************************************************
162* Internal Functions *
163*********************************************************************************************************************************/
164static int vmmR3InitStacks(PVM pVM);
165static int vmmR3InitLoggers(PVM pVM);
166static void vmmR3InitRegisterStats(PVM pVM);
167static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
168static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
169static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
170static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
171 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
172static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu);
173static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
174
175
176/**
177 * Initializes the VMM.
178 *
179 * @returns VBox status code.
180 * @param pVM The cross context VM structure.
181 */
182VMMR3_INT_DECL(int) VMMR3Init(PVM pVM)
183{
184 LogFlow(("VMMR3Init\n"));
185
186 /*
187 * Assert alignment, sizes and order.
188 */
189 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
190 AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding));
191 AssertCompile(sizeof(pVM->aCpus[0].vmm.s) <= sizeof(pVM->aCpus[0].vmm.padding));
192
193 /*
194 * Init basic VM VMM members.
195 */
196 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
197 pVM->vmm.s.pahEvtRendezvousEnterOrdered = NULL;
198 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
199 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
200 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
201 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
202 pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
203 pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
204 pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
205 pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
206
207 /** @cfgm{/YieldEMTInterval, uint32_t, 1, UINT32_MAX, 23, ms}
208 * The EMT yield interval. The EMT yielding is a hack we employ to play a
209 * bit nicer with the rest of the system (like for instance the GUI).
210 */
211 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies,
212 23 /* Value arrived at after experimenting with the grub boot prompt. */);
213 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
214
215
216 /** @cfgm{/VMM/UsePeriodicPreemptionTimers, boolean, true}
217 * Controls whether we employ per-cpu preemption timers to limit the time
218 * spent executing guest code. This option is not available on all
219 * platforms and we will silently ignore this setting then. If we are
220 * running in VT-x mode, we will use the VMX-preemption timer instead of
221 * this one when possible.
222 */
223 PCFGMNODE pCfgVMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "VMM");
224 rc = CFGMR3QueryBoolDef(pCfgVMM, "UsePeriodicPreemptionTimers", &pVM->vmm.s.fUsePeriodicPreemptionTimers, true);
225 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"VMM/UsePeriodicPreemptionTimers\", rc=%Rrc\n", rc), rc);
226
227 /*
228 * Initialize the VMM rendezvous semaphores.
229 */
230 pVM->vmm.s.pahEvtRendezvousEnterOrdered = (PRTSEMEVENT)MMR3HeapAlloc(pVM, MM_TAG_VMM, sizeof(RTSEMEVENT) * pVM->cCpus);
231 if (!pVM->vmm.s.pahEvtRendezvousEnterOrdered)
232 return VERR_NO_MEMORY;
233 for (VMCPUID i = 0; i < pVM->cCpus; i++)
234 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
235 for (VMCPUID i = 0; i < pVM->cCpus; i++)
236 {
237 rc = RTSemEventCreate(&pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
238 AssertRCReturn(rc, rc);
239 }
240 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousEnterOneByOne);
241 AssertRCReturn(rc, rc);
242 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
243 AssertRCReturn(rc, rc);
244 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousDone);
245 AssertRCReturn(rc, rc);
246 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
247 AssertRCReturn(rc, rc);
248 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPush);
249 AssertRCReturn(rc, rc);
250 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPop);
251 AssertRCReturn(rc, rc);
252 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
253 AssertRCReturn(rc, rc);
254 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
255 AssertRCReturn(rc, rc);
256
257 /*
258 * Register the saved state data unit.
259 */
260 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
261 NULL, NULL, NULL,
262 NULL, vmmR3Save, NULL,
263 NULL, vmmR3Load, NULL);
264 if (RT_FAILURE(rc))
265 return rc;
266
267 /*
268 * Register the Ring-0 VM handle with the session for fast ioctl calls.
269 */
270 rc = SUPR3SetVMForFastIOCtl(pVM->pVMR0);
271 if (RT_FAILURE(rc))
272 return rc;
273
274 /*
275 * Init various sub-components.
276 */
277 rc = vmmR3SwitcherInit(pVM);
278 if (RT_SUCCESS(rc))
279 {
280 rc = vmmR3InitStacks(pVM);
281 if (RT_SUCCESS(rc))
282 {
283 rc = vmmR3InitLoggers(pVM);
284
285#ifdef VBOX_WITH_NMI
286 /*
287 * Allocate mapping for the host APIC.
288 */
289 if (RT_SUCCESS(rc))
290 {
291 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
292 AssertRC(rc);
293 }
294#endif
295 if (RT_SUCCESS(rc))
296 {
297 /*
298 * Debug info and statistics.
299 */
300 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
301 vmmR3InitRegisterStats(pVM);
302 vmmInitFormatTypes();
303
304 return VINF_SUCCESS;
305 }
306 }
307 /** @todo: Need failure cleanup. */
308
309 //more todo in here?
310 //if (RT_SUCCESS(rc))
311 //{
312 //}
313 //int rc2 = vmmR3TermCoreCode(pVM);
314 //AssertRC(rc2));
315 }
316
317 return rc;
318}
319
320
321/**
322 * Allocate & setup the VMM RC stack(s) (for EMTs).
323 *
324 * The stacks are also used for long jumps in Ring-0.
325 *
326 * @returns VBox status code.
327 * @param pVM The cross context VM structure.
328 *
329 * @remarks The optional guard page gets it protection setup up during R3 init
330 * completion because of init order issues.
331 */
332static int vmmR3InitStacks(PVM pVM)
333{
334 int rc = VINF_SUCCESS;
335#ifdef VMM_R0_SWITCH_STACK
336 uint32_t fFlags = MMHYPER_AONR_FLAGS_KERNEL_MAPPING;
337#else
338 uint32_t fFlags = 0;
339#endif
340
341 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
342 {
343 PVMCPU pVCpu = &pVM->aCpus[idCpu];
344
345#ifdef VBOX_STRICT_VMM_STACK
346 rc = MMR3HyperAllocOnceNoRelEx(pVM, PAGE_SIZE + VMM_STACK_SIZE + PAGE_SIZE,
347#else
348 rc = MMR3HyperAllocOnceNoRelEx(pVM, VMM_STACK_SIZE,
349#endif
350 PAGE_SIZE, MM_TAG_VMM, fFlags, (void **)&pVCpu->vmm.s.pbEMTStackR3);
351 if (RT_SUCCESS(rc))
352 {
353#ifdef VBOX_STRICT_VMM_STACK
354 pVCpu->vmm.s.pbEMTStackR3 += PAGE_SIZE;
355#endif
356#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
357 /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
358 if (!HMIsEnabled(pVM))
359 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = NIL_RTR0PTR;
360 else
361#endif
362 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
363 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
364 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
365 AssertRelease(pVCpu->vmm.s.pbEMTStackRC);
366
367 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC);
368 }
369 }
370
371 return rc;
372}
373
374
375/**
376 * Initialize the loggers.
377 *
378 * @returns VBox status code.
379 * @param pVM The cross context VM structure.
380 */
381static int vmmR3InitLoggers(PVM pVM)
382{
383 int rc;
384#define RTLogCalcSizeForR0(cGroups, fFlags) (RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[cGroups]) + PAGE_SIZE)
385
386 /*
387 * Allocate RC & R0 Logger instances (they are finalized in the relocator).
388 */
389#ifdef LOG_ENABLED
390 PRTLOGGER pLogger = RTLogDefaultInstance();
391 if (pLogger)
392 {
393 if (!HMIsEnabled(pVM))
394 {
395 pVM->vmm.s.cbRCLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
396 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
397 if (RT_FAILURE(rc))
398 return rc;
399 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
400 }
401
402# ifdef VBOX_WITH_R0_LOGGING
403 size_t const cbLogger = RTLogCalcSizeForR0(pLogger->cGroups, 0);
404 for (VMCPUID i = 0; i < pVM->cCpus; i++)
405 {
406 PVMCPU pVCpu = &pVM->aCpus[i];
407 rc = MMR3HyperAllocOnceNoRelEx(pVM, cbLogger, PAGE_SIZE, MM_TAG_VMM, MMHYPER_AONR_FLAGS_KERNEL_MAPPING,
408 (void **)&pVCpu->vmm.s.pR0LoggerR3);
409 if (RT_FAILURE(rc))
410 return rc;
411 pVCpu->vmm.s.pR0LoggerR3->pVM = pVM->pVMR0;
412 //pVCpu->vmm.s.pR0LoggerR3->fCreated = false;
413 pVCpu->vmm.s.pR0LoggerR3->cbLogger = (uint32_t)cbLogger;
414 pVCpu->vmm.s.pR0LoggerR0 = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pR0LoggerR3);
415 }
416# endif
417 }
418#endif /* LOG_ENABLED */
419
420#ifdef VBOX_WITH_RC_RELEASE_LOGGING
421 /*
422 * Allocate RC release logger instances (finalized in the relocator).
423 */
424 if (!HMIsEnabled(pVM))
425 {
426 PRTLOGGER pRelLogger = RTLogRelGetDefaultInstance();
427 if (pRelLogger)
428 {
429 pVM->vmm.s.cbRCRelLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
430 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
431 if (RT_FAILURE(rc))
432 return rc;
433 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
434 }
435 }
436#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
437 return VINF_SUCCESS;
438}
439
440
441/**
442 * VMMR3Init worker that register the statistics with STAM.
443 *
444 * @param pVM The cross context VM structure.
445 */
446static void vmmR3InitRegisterStats(PVM pVM)
447{
448 RT_NOREF_PV(pVM);
449
450 /*
451 * Statistics.
452 */
453 STAM_REG(pVM, &pVM->vmm.s.StatRunRC, STAMTYPE_COUNTER, "/VMM/RunRC", STAMUNIT_OCCURENCES, "Number of context switches.");
454 STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
455 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
456 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
457 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
458 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
459 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
460 STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
461 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
462 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
463 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOBlockEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/EmulateIOBlock", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_IO_BLOCK returns.");
464 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
465 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_READ returns.");
466 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_WRITE returns.");
467 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_COMMIT_WRITE returns.");
468 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ returns.");
469 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_WRITE returns.");
470 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_COMMIT_WRITE returns.");
471 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ_WRITE returns.");
472 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
473 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
474 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRRead, STAMTYPE_COUNTER, "/VMM/RZRet/MSRRead", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_READ returns.");
475 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MSRWrite", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_WRITE returns.");
476 STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
477 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
478 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
479 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
480 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
481 STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
482 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
483 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
484 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
485 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
486 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
487 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
488 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Total, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
489 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Unknown, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Unknown", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns without responsible force flag.");
490 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3FF, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TO_R3.");
491 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3TMVirt, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/TMVirt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_TM_VIRTUAL_SYNC.");
492 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3HandyPages, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Handy", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PGM_NEED_HANDY_PAGES.");
493 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3PDMQueues, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/PDMQueue", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_QUEUES.");
494 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Rendezvous, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Rendezvous", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_EMT_RENDEZVOUS.");
495 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Timer, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Timer", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TIMER.");
496 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3DMA, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/DMA", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_DMA.");
497 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3CritSect, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/CritSect", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_PDM_CRITSECT.");
498 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iem, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IEM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IEM.");
499 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iom, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IOM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IOM.");
500 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
501 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
502 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
503 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
504 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
505 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
506 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
507 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
508 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_LOCK calls.");
509 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMCritSectEnter, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMCritSectEnter", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_CRITSECT_ENTER calls.");
510 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_LOCK calls.");
511 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMPoolGrow", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_POOL_GROW calls.");
512 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_MAP_CHUNK calls.");
513 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES calls.");
514 STAM_REG(pVM, &pVM->vmm.s.StatRZCallRemReplay, STAMTYPE_COUNTER, "/VMM/RZCallR3/REMReplay", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS calls.");
515 STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VMM_LOGGER_FLUSH calls.");
516 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_ERROR calls.");
517 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_RUNTIME_ERROR calls.");
518
519#ifdef VBOX_WITH_STATISTICS
520 for (VMCPUID i = 0; i < pVM->cCpus; i++)
521 {
522 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedMax, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i);
523 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedAvg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Average stack usage.", "/VMM/Stack/CPU%u/Avg", i);
524 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cUsedTotal, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of stack usages.", "/VMM/Stack/CPU%u/Uses", i);
525 }
526#endif
527}
528
529
530/**
531 * Initializes the R0 VMM.
532 *
533 * @returns VBox status code.
534 * @param pVM The cross context VM structure.
535 */
536VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
537{
538 int rc;
539 PVMCPU pVCpu = VMMGetCpu(pVM);
540 Assert(pVCpu && pVCpu->idCpu == 0);
541
542#ifdef LOG_ENABLED
543 /*
544 * Initialize the ring-0 logger if we haven't done so yet.
545 */
546 if ( pVCpu->vmm.s.pR0LoggerR3
547 && !pVCpu->vmm.s.pR0LoggerR3->fCreated)
548 {
549 rc = VMMR3UpdateLoggers(pVM);
550 if (RT_FAILURE(rc))
551 return rc;
552 }
553#endif
554
555 /*
556 * Call Ring-0 entry with init code.
557 */
558 for (;;)
559 {
560#ifdef NO_SUPCALLR0VMM
561 //rc = VERR_GENERAL_FAILURE;
562 rc = VINF_SUCCESS;
563#else
564 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT,
565 RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
566#endif
567 /*
568 * Flush the logs.
569 */
570#ifdef LOG_ENABLED
571 if ( pVCpu->vmm.s.pR0LoggerR3
572 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
573 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
574#endif
575 if (rc != VINF_VMM_CALL_HOST)
576 break;
577 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
578 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
579 break;
580 /* Resume R0 */
581 }
582
583 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
584 {
585 LogRel(("VMM: R0 init failed, rc=%Rra\n", rc));
586 if (RT_SUCCESS(rc))
587 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
588 }
589
590 /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
591 if (pVM->aCpus[0].vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
592 LogRel(("VMM: Enabled thread-context hooks\n"));
593 else
594 LogRel(("VMM: Thread-context hooks unavailable\n"));
595
596 return rc;
597}
598
599
600#ifdef VBOX_WITH_RAW_MODE
601/**
602 * Initializes the RC VMM.
603 *
604 * @returns VBox status code.
605 * @param pVM The cross context VM structure.
606 */
607VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM)
608{
609 PVMCPU pVCpu = VMMGetCpu(pVM);
610 Assert(pVCpu && pVCpu->idCpu == 0);
611
612 /* In VMX mode, there's no need to init RC. */
613 if (HMIsEnabled(pVM))
614 return VINF_SUCCESS;
615
616 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
617
618 /*
619 * Call VMMRCInit():
620 * -# resolve the address.
621 * -# setup stackframe and EIP to use the trampoline.
622 * -# do a generic hypervisor call.
623 */
624 RTRCPTR RCPtrEP;
625 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
626 if (RT_SUCCESS(rc))
627 {
628 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
629 uint64_t u64TS = RTTimeProgramStartNanoTS();
630 CPUMPushHyper(pVCpu, (uint32_t)(u64TS >> 32)); /* Param 4: The program startup TS - Hi. */
631 CPUMPushHyper(pVCpu, (uint32_t)u64TS); /* Param 4: The program startup TS - Lo. */
632 CPUMPushHyper(pVCpu, vmmGetBuildType()); /* Param 3: Version argument. */
633 CPUMPushHyper(pVCpu, VMMGetSvnRev()); /* Param 2: Version argument. */
634 CPUMPushHyper(pVCpu, VMMRC_DO_VMMRC_INIT); /* Param 1: Operation. */
635 CPUMPushHyper(pVCpu, pVM->pVMRC); /* Param 0: pVM */
636 CPUMPushHyper(pVCpu, 6 * sizeof(RTRCPTR)); /* trampoline param: stacksize. */
637 CPUMPushHyper(pVCpu, RCPtrEP); /* Call EIP. */
638 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
639 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
640
641 for (;;)
642 {
643#ifdef NO_SUPCALLR0VMM
644 //rc = VERR_GENERAL_FAILURE;
645 rc = VINF_SUCCESS;
646#else
647 rc = SUPR3CallVMMR0(pVM->pVMR0, 0 /* VCPU 0 */, VMMR0_DO_CALL_HYPERVISOR, NULL);
648#endif
649#ifdef LOG_ENABLED
650 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
651 if ( pLogger
652 && pLogger->offScratch > 0)
653 RTLogFlushRC(NULL, pLogger);
654#endif
655#ifdef VBOX_WITH_RC_RELEASE_LOGGING
656 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
657 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
658 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
659#endif
660 if (rc != VINF_VMM_CALL_HOST)
661 break;
662 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
663 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
664 break;
665 }
666
667 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
668 {
669 VMMR3FatalDump(pVM, pVCpu, rc);
670 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
671 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
672 }
673 AssertRC(rc);
674 }
675 return rc;
676}
677#endif /* VBOX_WITH_RAW_MODE */
678
679
680/**
681 * Called when an init phase completes.
682 *
683 * @returns VBox status code.
684 * @param pVM The cross context VM structure.
685 * @param enmWhat Which init phase.
686 */
687VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
688{
689 int rc = VINF_SUCCESS;
690
691 switch (enmWhat)
692 {
693 case VMINITCOMPLETED_RING3:
694 {
695 /*
696 * Set page attributes to r/w for stack pages.
697 */
698 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
699 {
700 rc = PGMMapSetPage(pVM, pVM->aCpus[idCpu].vmm.s.pbEMTStackRC, VMM_STACK_SIZE,
701 X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
702 AssertRCReturn(rc, rc);
703 }
704
705 /*
706 * Create the EMT yield timer.
707 */
708 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
709 AssertRCReturn(rc, rc);
710
711 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
712 AssertRCReturn(rc, rc);
713
714#ifdef VBOX_WITH_NMI
715 /*
716 * Map the host APIC into GC - This is AMD/Intel + Host OS specific!
717 */
718 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
719 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
720 AssertRCReturn(rc, rc);
721#endif
722
723#ifdef VBOX_STRICT_VMM_STACK
724 /*
725 * Setup the stack guard pages: Two inaccessible pages at each sides of the
726 * stack to catch over/under-flows.
727 */
728 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
729 {
730 uint8_t *pbEMTStackR3 = pVM->aCpus[idCpu].vmm.s.pbEMTStackR3;
731
732 memset(pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE);
733 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, true /*fSet*/);
734
735 memset(pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
736 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, true /*fSet*/);
737 }
738 pVM->vmm.s.fStackGuardsStationed = true;
739#endif
740 break;
741 }
742
743 case VMINITCOMPLETED_HM:
744 {
745 /*
746 * Disable the periodic preemption timers if we can use the
747 * VMX-preemption timer instead.
748 */
749 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
750 && HMR3IsVmxPreemptionTimerUsed(pVM))
751 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
752 LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
753
754 /*
755 * Last chance for GIM to update its CPUID leaves if it requires
756 * knowledge/information from HM initialization.
757 */
758 rc = GIMR3InitCompleted(pVM);
759 AssertRCReturn(rc, rc);
760
761 /*
762 * CPUM's post-initialization (print CPUIDs).
763 */
764 CPUMR3LogCpuIds(pVM);
765 break;
766 }
767
768 default: /* shuts up gcc */
769 break;
770 }
771
772 return rc;
773}
774
775
776/**
777 * Terminate the VMM bits.
778 *
779 * @returns VBox status code.
780 * @param pVM The cross context VM structure.
781 */
782VMMR3_INT_DECL(int) VMMR3Term(PVM pVM)
783{
784 PVMCPU pVCpu = VMMGetCpu(pVM);
785 Assert(pVCpu && pVCpu->idCpu == 0);
786
787 /*
788 * Call Ring-0 entry with termination code.
789 */
790 int rc;
791 for (;;)
792 {
793#ifdef NO_SUPCALLR0VMM
794 //rc = VERR_GENERAL_FAILURE;
795 rc = VINF_SUCCESS;
796#else
797 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
798#endif
799 /*
800 * Flush the logs.
801 */
802#ifdef LOG_ENABLED
803 if ( pVCpu->vmm.s.pR0LoggerR3
804 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
805 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
806#endif
807 if (rc != VINF_VMM_CALL_HOST)
808 break;
809 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
810 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
811 break;
812 /* Resume R0 */
813 }
814 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
815 {
816 LogRel(("VMM: VMMR3Term: R0 term failed, rc=%Rra. (warning)\n", rc));
817 if (RT_SUCCESS(rc))
818 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
819 }
820
821 for (VMCPUID i = 0; i < pVM->cCpus; i++)
822 {
823 RTSemEventDestroy(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
824 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
825 }
826 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
827 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
828 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
829 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
830 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousDone);
831 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
832 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousDoneCaller);
833 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
834 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
835 pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
836 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
837 pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
838 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
839 pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
840 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
841 pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
842
843#ifdef VBOX_STRICT_VMM_STACK
844 /*
845 * Make the two stack guard pages present again.
846 */
847 if (pVM->vmm.s.fStackGuardsStationed)
848 {
849 for (VMCPUID i = 0; i < pVM->cCpus; i++)
850 {
851 uint8_t *pbEMTStackR3 = pVM->aCpus[i].vmm.s.pbEMTStackR3;
852 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, false /*fSet*/);
853 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, false /*fSet*/);
854 }
855 pVM->vmm.s.fStackGuardsStationed = false;
856 }
857#endif
858
859 vmmTermFormatTypes();
860 return rc;
861}
862
863
864/**
865 * Applies relocations to data and code managed by this
866 * component. This function will be called at init and
867 * whenever the VMM need to relocate it self inside the GC.
868 *
869 * The VMM will need to apply relocations to the core code.
870 *
871 * @param pVM The cross context VM structure.
872 * @param offDelta The relocation delta.
873 */
874VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
875{
876 LogFlow(("VMMR3Relocate: offDelta=%RGv\n", offDelta));
877
878 /*
879 * Recalc the RC address.
880 */
881#ifdef VBOX_WITH_RAW_MODE
882 pVM->vmm.s.pvCoreCodeRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pvCoreCodeR3);
883#endif
884
885 /*
886 * The stack.
887 */
888 for (VMCPUID i = 0; i < pVM->cCpus; i++)
889 {
890 PVMCPU pVCpu = &pVM->aCpus[i];
891
892 CPUMSetHyperESP(pVCpu, CPUMGetHyperESP(pVCpu) + offDelta);
893
894 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
895 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
896 }
897
898 /*
899 * All the switchers.
900 */
901 vmmR3SwitcherRelocate(pVM, offDelta);
902
903 /*
904 * Get other RC entry points.
905 */
906 if (!HMIsEnabled(pVM))
907 {
908 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
909 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Rra\n", rc));
910
911 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
912 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Rra\n", rc));
913 }
914
915 /*
916 * Update the logger.
917 */
918 VMMR3UpdateLoggers(pVM);
919}
920
921
922/**
923 * Updates the settings for the RC and R0 loggers.
924 *
925 * @returns VBox status code.
926 * @param pVM The cross context VM structure.
927 */
928VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
929{
930 /*
931 * Simply clone the logger instance (for RC).
932 */
933 int rc = VINF_SUCCESS;
934 RTRCPTR RCPtrLoggerFlush = 0;
935
936 if ( pVM->vmm.s.pRCLoggerR3
937#ifdef VBOX_WITH_RC_RELEASE_LOGGING
938 || pVM->vmm.s.pRCRelLoggerR3
939#endif
940 )
941 {
942 Assert(!HMIsEnabled(pVM));
943 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &RCPtrLoggerFlush);
944 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Rra\n", rc));
945 }
946
947 if (pVM->vmm.s.pRCLoggerR3)
948 {
949 Assert(!HMIsEnabled(pVM));
950 RTRCPTR RCPtrLoggerWrapper = 0;
951 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &RCPtrLoggerWrapper);
952 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Rra\n", rc));
953
954 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
955 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pRCLoggerR3, pVM->vmm.s.cbRCLogger,
956 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
957 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
958 }
959
960#ifdef VBOX_WITH_RC_RELEASE_LOGGING
961 if (pVM->vmm.s.pRCRelLoggerR3)
962 {
963 Assert(!HMIsEnabled(pVM));
964 RTRCPTR RCPtrLoggerWrapper = 0;
965 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper);
966 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Rra\n", rc));
967
968 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
969 rc = RTLogCloneRC(RTLogRelGetDefaultInstance(), pVM->vmm.s.pRCRelLoggerR3, pVM->vmm.s.cbRCRelLogger,
970 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
971 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
972 }
973#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
974
975#ifdef LOG_ENABLED
976 /*
977 * For the ring-0 EMT logger, we use a per-thread logger instance
978 * in ring-0. Only initialize it once.
979 */
980 PRTLOGGER const pDefault = RTLogDefaultInstance();
981 for (VMCPUID i = 0; i < pVM->cCpus; i++)
982 {
983 PVMCPU pVCpu = &pVM->aCpus[i];
984 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3;
985 if (pR0LoggerR3)
986 {
987 if (!pR0LoggerR3->fCreated)
988 {
989 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
990 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
991 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerWrapper not found! rc=%Rra\n", rc), rc);
992
993 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
994 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
995 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerFlush not found! rc=%Rra\n", rc), rc);
996
997 rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger,
998 pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
999 pfnLoggerWrapper, pfnLoggerFlush,
1000 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
1001 AssertReleaseMsgRCReturn(rc, ("RTLogCreateForR0 failed! rc=%Rra\n", rc), rc);
1002
1003 RTR0PTR pfnLoggerPrefix = NIL_RTR0PTR;
1004 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerPrefix", &pfnLoggerPrefix);
1005 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerPrefix not found! rc=%Rra\n", rc), rc);
1006 rc = RTLogSetCustomPrefixCallbackForR0(&pR0LoggerR3->Logger,
1007 pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
1008 pfnLoggerPrefix, NIL_RTR0PTR);
1009 AssertReleaseMsgRCReturn(rc, ("RTLogSetCustomPrefixCallback failed! rc=%Rra\n", rc), rc);
1010
1011 pR0LoggerR3->idCpu = i;
1012 pR0LoggerR3->fCreated = true;
1013 pR0LoggerR3->fFlushingDisabled = false;
1014
1015 }
1016
1017 rc = RTLogCopyGroupsAndFlagsForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
1018 pDefault, RTLOGFLAGS_BUFFERED, UINT32_MAX);
1019 AssertRC(rc);
1020 }
1021 }
1022#endif
1023 return rc;
1024}
1025
1026
1027/**
1028 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg1Weak output.
1029 *
1030 * @returns Pointer to the buffer.
1031 * @param pVM The cross context VM structure.
1032 */
1033VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
1034{
1035 if (HMIsEnabled(pVM))
1036 return pVM->vmm.s.szRing0AssertMsg1;
1037
1038 RTRCPTR RCPtr;
1039 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg1", &RCPtr);
1040 if (RT_SUCCESS(rc))
1041 return (const char *)MMHyperRCToR3(pVM, RCPtr);
1042
1043 return NULL;
1044}
1045
1046
1047/**
1048 * Returns the VMCPU of the specified virtual CPU.
1049 *
1050 * @returns The VMCPU pointer. NULL if @a idCpu or @a pUVM is invalid.
1051 *
1052 * @param pUVM The user mode VM handle.
1053 * @param idCpu The ID of the virtual CPU.
1054 */
1055VMMR3DECL(PVMCPU) VMMR3GetCpuByIdU(PUVM pUVM, RTCPUID idCpu)
1056{
1057 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
1058 AssertReturn(idCpu < pUVM->cCpus, NULL);
1059 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
1060 return &pUVM->pVM->aCpus[idCpu];
1061}
1062
1063
1064/**
1065 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg2Weak output.
1066 *
1067 * @returns Pointer to the buffer.
1068 * @param pVM The cross context VM structure.
1069 */
1070VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
1071{
1072 if (HMIsEnabled(pVM))
1073 return pVM->vmm.s.szRing0AssertMsg2;
1074
1075 RTRCPTR RCPtr;
1076 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg2", &RCPtr);
1077 if (RT_SUCCESS(rc))
1078 return (const char *)MMHyperRCToR3(pVM, RCPtr);
1079
1080 return NULL;
1081}
1082
1083
1084/**
1085 * Execute state save operation.
1086 *
1087 * @returns VBox status code.
1088 * @param pVM The cross context VM structure.
1089 * @param pSSM SSM operation handle.
1090 */
1091static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1092{
1093 LogFlow(("vmmR3Save:\n"));
1094
1095 /*
1096 * Save the started/stopped state of all CPUs except 0 as it will always
1097 * be running. This avoids breaking the saved state version. :-)
1098 */
1099 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1100 SSMR3PutBool(pSSM, VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(&pVM->aCpus[i])));
1101
1102 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
1103}
1104
1105
1106/**
1107 * Execute state load operation.
1108 *
1109 * @returns VBox status code.
1110 * @param pVM The cross context VM structure.
1111 * @param pSSM SSM operation handle.
1112 * @param uVersion Data layout version.
1113 * @param uPass The data pass.
1114 */
1115static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1116{
1117 LogFlow(("vmmR3Load:\n"));
1118 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1119
1120 /*
1121 * Validate version.
1122 */
1123 if ( uVersion != VMM_SAVED_STATE_VERSION
1124 && uVersion != VMM_SAVED_STATE_VERSION_3_0)
1125 {
1126 AssertMsgFailed(("vmmR3Load: Invalid version uVersion=%u!\n", uVersion));
1127 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1128 }
1129
1130 if (uVersion <= VMM_SAVED_STATE_VERSION_3_0)
1131 {
1132 /* Ignore the stack bottom, stack pointer and stack bits. */
1133 RTRCPTR RCPtrIgnored;
1134 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1135 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1136#ifdef RT_OS_DARWIN
1137 if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(3,0,0)
1138 && SSMR3HandleVersion(pSSM) < VBOX_FULL_VERSION_MAKE(3,1,0)
1139 && SSMR3HandleRevision(pSSM) >= 48858
1140 && ( !strcmp(SSMR3HandleHostOSAndArch(pSSM), "darwin.x86")
1141 || !strcmp(SSMR3HandleHostOSAndArch(pSSM), "") )
1142 )
1143 SSMR3Skip(pSSM, 16384);
1144 else
1145 SSMR3Skip(pSSM, 8192);
1146#else
1147 SSMR3Skip(pSSM, 8192);
1148#endif
1149 }
1150
1151 /*
1152 * Restore the VMCPU states. VCPU 0 is always started.
1153 */
1154 VMCPU_SET_STATE(&pVM->aCpus[0], VMCPUSTATE_STARTED);
1155 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1156 {
1157 bool fStarted;
1158 int rc = SSMR3GetBool(pSSM, &fStarted);
1159 if (RT_FAILURE(rc))
1160 return rc;
1161 VMCPU_SET_STATE(&pVM->aCpus[i], fStarted ? VMCPUSTATE_STARTED : VMCPUSTATE_STOPPED);
1162 }
1163
1164 /* terminator */
1165 uint32_t u32;
1166 int rc = SSMR3GetU32(pSSM, &u32);
1167 if (RT_FAILURE(rc))
1168 return rc;
1169 if (u32 != UINT32_MAX)
1170 {
1171 AssertMsgFailed(("u32=%#x\n", u32));
1172 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1173 }
1174 return VINF_SUCCESS;
1175}
1176
1177
1178#ifdef VBOX_WITH_RAW_MODE
1179/**
1180 * Resolve a builtin RC symbol.
1181 *
1182 * Called by PDM when loading or relocating RC modules.
1183 *
1184 * @returns VBox status
1185 * @param pVM The cross context VM structure.
1186 * @param pszSymbol Symbol to resolve.
1187 * @param pRCPtrValue Where to store the symbol value.
1188 *
1189 * @remark This has to work before VMMR3Relocate() is called.
1190 */
1191VMMR3_INT_DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue)
1192{
1193 if (!strcmp(pszSymbol, "g_Logger"))
1194 {
1195 if (pVM->vmm.s.pRCLoggerR3)
1196 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
1197 *pRCPtrValue = pVM->vmm.s.pRCLoggerRC;
1198 }
1199 else if (!strcmp(pszSymbol, "g_RelLogger"))
1200 {
1201# ifdef VBOX_WITH_RC_RELEASE_LOGGING
1202 if (pVM->vmm.s.pRCRelLoggerR3)
1203 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
1204 *pRCPtrValue = pVM->vmm.s.pRCRelLoggerRC;
1205# else
1206 *pRCPtrValue = NIL_RTRCPTR;
1207# endif
1208 }
1209 else
1210 return VERR_SYMBOL_NOT_FOUND;
1211 return VINF_SUCCESS;
1212}
1213#endif /* VBOX_WITH_RAW_MODE */
1214
1215
1216/**
1217 * Suspends the CPU yielder.
1218 *
1219 * @param pVM The cross context VM structure.
1220 */
1221VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM)
1222{
1223 VMCPU_ASSERT_EMT(&pVM->aCpus[0]);
1224 if (!pVM->vmm.s.cYieldResumeMillies)
1225 {
1226 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1227 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1228 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1229 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1230 else
1231 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1232 TMTimerStop(pVM->vmm.s.pYieldTimer);
1233 }
1234 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1235}
1236
1237
1238/**
1239 * Stops the CPU yielder.
1240 *
1241 * @param pVM The cross context VM structure.
1242 */
1243VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM)
1244{
1245 if (!pVM->vmm.s.cYieldResumeMillies)
1246 TMTimerStop(pVM->vmm.s.pYieldTimer);
1247 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1248 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1249}
1250
1251
1252/**
1253 * Resumes the CPU yielder when it has been a suspended or stopped.
1254 *
1255 * @param pVM The cross context VM structure.
1256 */
1257VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM)
1258{
1259 if (pVM->vmm.s.cYieldResumeMillies)
1260 {
1261 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1262 pVM->vmm.s.cYieldResumeMillies = 0;
1263 }
1264}
1265
1266
1267/**
1268 * Internal timer callback function.
1269 *
1270 * @param pVM The cross context VM structure.
1271 * @param pTimer The timer handle.
1272 * @param pvUser User argument specified upon timer creation.
1273 */
1274static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1275{
1276 NOREF(pvUser);
1277
1278 /*
1279 * This really needs some careful tuning. While we shouldn't be too greedy since
1280 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1281 * because that'll cause us to stop up.
1282 *
1283 * The current logic is to use the default interval when there is no lag worth
1284 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1285 *
1286 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1287 * so the lag is up to date.)
1288 */
1289 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1290 if ( u64Lag < 50000000 /* 50ms */
1291 || ( u64Lag < 1000000000 /* 1s */
1292 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1293 )
1294 {
1295 uint64_t u64Elapsed = RTTimeNanoTS();
1296 pVM->vmm.s.u64LastYield = u64Elapsed;
1297
1298 RTThreadYield();
1299
1300#ifdef LOG_ENABLED
1301 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1302 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1303#endif
1304 }
1305 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1306}
1307
1308
1309#ifdef VBOX_WITH_RAW_MODE
1310/**
1311 * Executes guest code in the raw-mode context.
1312 *
1313 * @param pVM The cross context VM structure.
1314 * @param pVCpu The cross context virtual CPU structure.
1315 */
1316VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu)
1317{
1318 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1319
1320 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1321
1322 /*
1323 * Set the hypervisor to resume executing a CPUM resume function
1324 * in CPUMRCA.asm.
1325 */
1326 CPUMSetHyperState(pVCpu,
1327 CPUMGetGuestEFlags(pVCpu) & X86_EFL_VM
1328 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86
1329 : pVM->vmm.s.pfnCPUMRCResumeGuest, /* eip */
1330 pVCpu->vmm.s.pbEMTStackBottomRC, /* esp */
1331 0, /* eax */
1332 VM_RC_ADDR(pVM, &pVCpu->cpum) /* edx */);
1333
1334 /*
1335 * We hide log flushes (outer) and hypervisor interrupts (inner).
1336 */
1337 for (;;)
1338 {
1339#ifdef VBOX_STRICT
1340 if (RT_UNLIKELY(!CPUMGetHyperCR3(pVCpu) || CPUMGetHyperCR3(pVCpu) != PGMGetHyperCR3(pVCpu)))
1341 EMR3FatalError(pVCpu, VERR_VMM_HYPER_CR3_MISMATCH);
1342 PGMMapCheck(pVM);
1343# ifdef VBOX_WITH_SAFE_STR
1344 SELMR3CheckShadowTR(pVM);
1345# endif
1346#endif
1347 int rc;
1348 do
1349 {
1350#ifdef NO_SUPCALLR0VMM
1351 rc = VERR_GENERAL_FAILURE;
1352#else
1353 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1354 if (RT_LIKELY(rc == VINF_SUCCESS))
1355 rc = pVCpu->vmm.s.iLastGZRc;
1356#endif
1357 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1358
1359 /*
1360 * Flush the logs.
1361 */
1362#ifdef LOG_ENABLED
1363 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1364 if ( pLogger
1365 && pLogger->offScratch > 0)
1366 RTLogFlushRC(NULL, pLogger);
1367#endif
1368#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1369 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1370 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1371 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
1372#endif
1373 if (rc != VINF_VMM_CALL_HOST)
1374 {
1375 Log2(("VMMR3RawRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1376 return rc;
1377 }
1378 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1379 if (RT_FAILURE(rc))
1380 return rc;
1381 /* Resume GC */
1382 }
1383}
1384#endif /* VBOX_WITH_RAW_MODE */
1385
1386
1387/**
1388 * Executes guest code (Intel VT-x and AMD-V).
1389 *
1390 * @param pVM The cross context VM structure.
1391 * @param pVCpu The cross context virtual CPU structure.
1392 */
1393VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu)
1394{
1395 Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1396
1397 for (;;)
1398 {
1399 int rc;
1400 do
1401 {
1402#ifdef NO_SUPCALLR0VMM
1403 rc = VERR_GENERAL_FAILURE;
1404#else
1405 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, pVCpu->idCpu);
1406 if (RT_LIKELY(rc == VINF_SUCCESS))
1407 rc = pVCpu->vmm.s.iLastGZRc;
1408#endif
1409 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1410
1411#if 0 /* todo triggers too often */
1412 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
1413#endif
1414
1415#ifdef LOG_ENABLED
1416 /*
1417 * Flush the log
1418 */
1419 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3;
1420 if ( pR0LoggerR3
1421 && pR0LoggerR3->Logger.offScratch > 0)
1422 RTLogFlushR0(NULL, &pR0LoggerR3->Logger);
1423#endif /* !LOG_ENABLED */
1424 if (rc != VINF_VMM_CALL_HOST)
1425 {
1426 Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1427 return rc;
1428 }
1429 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1430 if (RT_FAILURE(rc))
1431 return rc;
1432 /* Resume R0 */
1433 }
1434}
1435
1436
1437/**
1438 * VCPU worker for VMMSendStartupIpi.
1439 *
1440 * @param pVM The cross context VM structure.
1441 * @param idCpu Virtual CPU to perform SIPI on.
1442 * @param uVector The SIPI vector.
1443 */
1444static DECLCALLBACK(int) vmmR3SendStarupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1445{
1446 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1447 VMCPU_ASSERT_EMT(pVCpu);
1448
1449 /*
1450 * Active, halt and shutdown states of the processor all block SIPIs.
1451 * So we can safely discard the SIPI. See Intel spec. 26.6.2 "Activity State".
1452 */
1453 if (EMGetState(pVCpu) != EMSTATE_WAIT_SIPI)
1454 return VERR_ACCESS_DENIED;
1455
1456
1457 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1458
1459 pCtx->cs.Sel = uVector << 8;
1460 pCtx->cs.ValidSel = uVector << 8;
1461 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1462 pCtx->cs.u64Base = uVector << 12;
1463 pCtx->cs.u32Limit = UINT32_C(0x0000ffff);
1464 pCtx->rip = 0;
1465
1466 Log(("vmmR3SendSipi for VCPU %d with vector %x\n", idCpu, uVector));
1467
1468# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
1469 EMSetState(pVCpu, EMSTATE_HALTED);
1470 return VINF_EM_RESCHEDULE;
1471# else /* And if we go the VMCPU::enmState way it can stay here. */
1472 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STOPPED);
1473 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1474 return VINF_SUCCESS;
1475# endif
1476}
1477
1478
1479static DECLCALLBACK(int) vmmR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1480{
1481 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1482 VMCPU_ASSERT_EMT(pVCpu);
1483
1484 Log(("vmmR3SendInitIpi for VCPU %d\n", idCpu));
1485
1486 PGMR3ResetCpu(pVM, pVCpu);
1487 PDMR3ResetCpu(pVCpu); /* Only clears pending interrupts force flags */
1488#ifdef VBOX_WITH_NEW_APIC
1489 APICR3InitIpi(pVCpu);
1490#endif
1491 TRPMR3ResetCpu(pVCpu);
1492 CPUMR3ResetCpu(pVM, pVCpu);
1493 EMR3ResetCpu(pVCpu);
1494 HMR3ResetCpu(pVCpu);
1495
1496 /* This will trickle up on the target EMT. */
1497 return VINF_EM_WAIT_SIPI;
1498}
1499
1500
1501/**
1502 * Sends a Startup IPI to the virtual CPU by setting CS:EIP into
1503 * vector-dependent state and unhalting processor.
1504 *
1505 * @param pVM The cross context VM structure.
1506 * @param idCpu Virtual CPU to perform SIPI on.
1507 * @param uVector SIPI vector.
1508 */
1509VMMR3_INT_DECL(void) VMMR3SendStartupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1510{
1511 AssertReturnVoid(idCpu < pVM->cCpus);
1512
1513 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendStarupIpi, 3, pVM, idCpu, uVector);
1514 AssertRC(rc);
1515}
1516
1517
1518/**
1519 * Sends init IPI to the virtual CPU.
1520 *
1521 * @param pVM The cross context VM structure.
1522 * @param idCpu Virtual CPU to perform int IPI on.
1523 */
1524VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1525{
1526 AssertReturnVoid(idCpu < pVM->cCpus);
1527
1528 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
1529 AssertRC(rc);
1530}
1531
1532
1533/**
1534 * Registers the guest memory range that can be used for patching.
1535 *
1536 * @returns VBox status code.
1537 * @param pVM The cross context VM structure.
1538 * @param pPatchMem Patch memory range.
1539 * @param cbPatchMem Size of the memory range.
1540 */
1541VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1542{
1543 VM_ASSERT_EMT(pVM);
1544 if (HMIsEnabled(pVM))
1545 return HMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
1546
1547 return VERR_NOT_SUPPORTED;
1548}
1549
1550
1551/**
1552 * Deregisters the guest memory range that can be used for patching.
1553 *
1554 * @returns VBox status code.
1555 * @param pVM The cross context VM structure.
1556 * @param pPatchMem Patch memory range.
1557 * @param cbPatchMem Size of the memory range.
1558 */
1559VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1560{
1561 if (HMIsEnabled(pVM))
1562 return HMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
1563
1564 return VINF_SUCCESS;
1565}
1566
1567
1568/**
1569 * Common recursion handler for the other EMTs.
1570 *
1571 * @returns Strict VBox status code.
1572 * @param pVM The cross context VM structure.
1573 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1574 * @param rcStrict Current status code to be combined with the one
1575 * from this recursion and returned.
1576 */
1577static VBOXSTRICTRC vmmR3EmtRendezvousCommonRecursion(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
1578{
1579 int rc2;
1580
1581 /*
1582 * We wait here while the initiator of this recursion reconfigures
1583 * everything. The last EMT to get in signals the initiator.
1584 */
1585 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) == pVM->cCpus)
1586 {
1587 rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
1588 AssertLogRelRC(rc2);
1589 }
1590
1591 rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPush, RT_INDEFINITE_WAIT);
1592 AssertLogRelRC(rc2);
1593
1594 /*
1595 * Do the normal rendezvous processing.
1596 */
1597 VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1598 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1599
1600 /*
1601 * Wait for the initiator to restore everything.
1602 */
1603 rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPop, RT_INDEFINITE_WAIT);
1604 AssertLogRelRC(rc2);
1605
1606 /*
1607 * Last thread out of here signals the initiator.
1608 */
1609 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) == pVM->cCpus)
1610 {
1611 rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
1612 AssertLogRelRC(rc2);
1613 }
1614
1615 /*
1616 * Merge status codes and return.
1617 */
1618 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
1619 if ( rcStrict2 != VINF_SUCCESS
1620 && ( rcStrict == VINF_SUCCESS
1621 || rcStrict > rcStrict2))
1622 rcStrict = rcStrict2;
1623 return rcStrict;
1624}
1625
1626
1627/**
1628 * Count returns and have the last non-caller EMT wake up the caller.
1629 *
1630 * @returns VBox strict informational status code for EM scheduling. No failures
1631 * will be returned here, those are for the caller only.
1632 *
1633 * @param pVM The cross context VM structure.
1634 * @param rcStrict The current accumulated recursive status code,
1635 * to be merged with i32RendezvousStatus and
1636 * returned.
1637 */
1638DECL_FORCE_INLINE(VBOXSTRICTRC) vmmR3EmtRendezvousNonCallerReturn(PVM pVM, VBOXSTRICTRC rcStrict)
1639{
1640 VBOXSTRICTRC rcStrict2 = ASMAtomicReadS32(&pVM->vmm.s.i32RendezvousStatus);
1641
1642 uint32_t cReturned = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsReturned);
1643 if (cReturned == pVM->cCpus - 1U)
1644 {
1645 int rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
1646 AssertLogRelRC(rc);
1647 }
1648
1649 /*
1650 * Merge the status codes, ignoring error statuses in this code path.
1651 */
1652 AssertLogRelMsgReturn( rcStrict2 <= VINF_SUCCESS
1653 || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
1654 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1655 VERR_IPE_UNEXPECTED_INFO_STATUS);
1656
1657 if (RT_SUCCESS(rcStrict2))
1658 {
1659 if ( rcStrict2 != VINF_SUCCESS
1660 && ( rcStrict == VINF_SUCCESS
1661 || rcStrict > rcStrict2))
1662 rcStrict = rcStrict2;
1663 }
1664 return rcStrict;
1665}
1666
1667
1668/**
1669 * Common worker for VMMR3EmtRendezvous and VMMR3EmtRendezvousFF.
1670 *
1671 * @returns VBox strict informational status code for EM scheduling. No failures
1672 * will be returned here, those are for the caller only. When
1673 * fIsCaller is set, VINF_SUCCESS is always returned.
1674 *
1675 * @param pVM The cross context VM structure.
1676 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1677 * @param fIsCaller Whether we're the VMMR3EmtRendezvous caller or
1678 * not.
1679 * @param fFlags The flags.
1680 * @param pfnRendezvous The callback.
1681 * @param pvUser The user argument for the callback.
1682 */
1683static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
1684 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1685{
1686 int rc;
1687 VBOXSTRICTRC rcStrictRecursion = VINF_SUCCESS;
1688
1689 /*
1690 * Enter, the last EMT triggers the next callback phase.
1691 */
1692 uint32_t cEntered = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsEntered);
1693 if (cEntered != pVM->cCpus)
1694 {
1695 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1696 {
1697 /* Wait for our turn. */
1698 for (;;)
1699 {
1700 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, RT_INDEFINITE_WAIT);
1701 AssertLogRelRC(rc);
1702 if (!pVM->vmm.s.fRendezvousRecursion)
1703 break;
1704 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1705 }
1706 }
1707 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1708 {
1709 /* Wait for the last EMT to arrive and wake everyone up. */
1710 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce, RT_INDEFINITE_WAIT);
1711 AssertLogRelRC(rc);
1712 Assert(!pVM->vmm.s.fRendezvousRecursion);
1713 }
1714 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1715 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1716 {
1717 /* Wait for our turn. */
1718 for (;;)
1719 {
1720 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1721 AssertLogRelRC(rc);
1722 if (!pVM->vmm.s.fRendezvousRecursion)
1723 break;
1724 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1725 }
1726 }
1727 else
1728 {
1729 Assert((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE);
1730
1731 /*
1732 * The execute once is handled specially to optimize the code flow.
1733 *
1734 * The last EMT to arrive will perform the callback and the other
1735 * EMTs will wait on the Done/DoneCaller semaphores (instead of
1736 * the EnterOneByOne/AllAtOnce) in the meanwhile. When the callback
1737 * returns, that EMT will initiate the normal return sequence.
1738 */
1739 if (!fIsCaller)
1740 {
1741 for (;;)
1742 {
1743 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1744 AssertLogRelRC(rc);
1745 if (!pVM->vmm.s.fRendezvousRecursion)
1746 break;
1747 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1748 }
1749
1750 return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
1751 }
1752 return VINF_SUCCESS;
1753 }
1754 }
1755 else
1756 {
1757 /*
1758 * All EMTs are waiting, clear the FF and take action according to the
1759 * execution method.
1760 */
1761 VM_FF_CLEAR(pVM, VM_FF_EMT_RENDEZVOUS);
1762
1763 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1764 {
1765 /* Wake up everyone. */
1766 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
1767 AssertLogRelRC(rc);
1768 }
1769 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1770 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1771 {
1772 /* Figure out who to wake up and wake it up. If it's ourself, then
1773 it's easy otherwise wait for our turn. */
1774 VMCPUID iFirst = (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1775 ? 0
1776 : pVM->cCpus - 1U;
1777 if (pVCpu->idCpu != iFirst)
1778 {
1779 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iFirst]);
1780 AssertLogRelRC(rc);
1781 for (;;)
1782 {
1783 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1784 AssertLogRelRC(rc);
1785 if (!pVM->vmm.s.fRendezvousRecursion)
1786 break;
1787 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1788 }
1789 }
1790 }
1791 /* else: execute the handler on the current EMT and wake up one or more threads afterwards. */
1792 }
1793
1794
1795 /*
1796 * Do the callback and update the status if necessary.
1797 */
1798 if ( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1799 || RT_SUCCESS(ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus)) )
1800 {
1801 VBOXSTRICTRC rcStrict2 = pfnRendezvous(pVM, pVCpu, pvUser);
1802 if (rcStrict2 != VINF_SUCCESS)
1803 {
1804 AssertLogRelMsg( rcStrict2 <= VINF_SUCCESS
1805 || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
1806 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
1807 int32_t i32RendezvousStatus;
1808 do
1809 {
1810 i32RendezvousStatus = ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus);
1811 if ( rcStrict2 == i32RendezvousStatus
1812 || RT_FAILURE(i32RendezvousStatus)
1813 || ( i32RendezvousStatus != VINF_SUCCESS
1814 && rcStrict2 > i32RendezvousStatus))
1815 break;
1816 } while (!ASMAtomicCmpXchgS32(&pVM->vmm.s.i32RendezvousStatus, VBOXSTRICTRC_VAL(rcStrict2), i32RendezvousStatus));
1817 }
1818 }
1819
1820 /*
1821 * Increment the done counter and take action depending on whether we're
1822 * the last to finish callback execution.
1823 */
1824 uint32_t cDone = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsDone);
1825 if ( cDone != pVM->cCpus
1826 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE)
1827 {
1828 /* Signal the next EMT? */
1829 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1830 {
1831 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
1832 AssertLogRelRC(rc);
1833 }
1834 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
1835 {
1836 Assert(cDone == pVCpu->idCpu + 1U);
1837 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu + 1U]);
1838 AssertLogRelRC(rc);
1839 }
1840 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1841 {
1842 Assert(pVM->cCpus - cDone == pVCpu->idCpu);
1843 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVM->cCpus - cDone - 1U]);
1844 AssertLogRelRC(rc);
1845 }
1846
1847 /* Wait for the rest to finish (the caller waits on hEvtRendezvousDoneCaller). */
1848 if (!fIsCaller)
1849 {
1850 for (;;)
1851 {
1852 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1853 AssertLogRelRC(rc);
1854 if (!pVM->vmm.s.fRendezvousRecursion)
1855 break;
1856 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1857 }
1858 }
1859 }
1860 else
1861 {
1862 /* Callback execution is all done, tell the rest to return. */
1863 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
1864 AssertLogRelRC(rc);
1865 }
1866
1867 if (!fIsCaller)
1868 return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
1869 return rcStrictRecursion;
1870}
1871
1872
1873/**
1874 * Called in response to VM_FF_EMT_RENDEZVOUS.
1875 *
1876 * @returns VBox strict status code - EM scheduling. No errors will be returned
1877 * here, nor will any non-EM scheduling status codes be returned.
1878 *
1879 * @param pVM The cross context VM structure.
1880 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1881 *
1882 * @thread EMT
1883 */
1884VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu)
1885{
1886 Assert(!pVCpu->vmm.s.fInRendezvous);
1887 Log(("VMMR3EmtRendezvousFF: EMT%#u\n", pVCpu->idCpu));
1888 pVCpu->vmm.s.fInRendezvous = true;
1889 VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1890 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1891 pVCpu->vmm.s.fInRendezvous = false;
1892 Log(("VMMR3EmtRendezvousFF: EMT%#u returns %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
1893 return VBOXSTRICTRC_TODO(rcStrict);
1894}
1895
1896
1897/**
1898 * Helper for resetting an single wakeup event sempahore.
1899 *
1900 * @returns VERR_TIMEOUT on success, RTSemEventWait status otherwise.
1901 * @param hEvt The event semaphore to reset.
1902 */
1903static int vmmR3HlpResetEvent(RTSEMEVENT hEvt)
1904{
1905 for (uint32_t cLoops = 0; ; cLoops++)
1906 {
1907 int rc = RTSemEventWait(hEvt, 0 /*cMsTimeout*/);
1908 if (rc != VINF_SUCCESS || cLoops > _4K)
1909 return rc;
1910 }
1911}
1912
1913
1914/**
1915 * Worker for VMMR3EmtRendezvous that handles recursion.
1916 *
1917 * @returns VBox strict status code. This will be the first error,
1918 * VINF_SUCCESS, or an EM scheduling status code.
1919 *
1920 * @param pVM The cross context VM structure.
1921 * @param pVCpu The cross context virtual CPU structure of the
1922 * calling EMT.
1923 * @param fFlags Flags indicating execution methods. See
1924 * grp_VMMR3EmtRendezvous_fFlags.
1925 * @param pfnRendezvous The callback.
1926 * @param pvUser User argument for the callback.
1927 *
1928 * @thread EMT(pVCpu)
1929 */
1930static VBOXSTRICTRC vmmR3EmtRendezvousRecursive(PVM pVM, PVMCPU pVCpu, uint32_t fFlags,
1931 PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1932{
1933 Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d\n", fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions));
1934 AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
1935 Assert(pVCpu->vmm.s.fInRendezvous);
1936
1937 /*
1938 * Save the current state.
1939 */
1940 uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
1941 uint32_t const cParentDone = pVM->vmm.s.cRendezvousEmtsDone;
1942 int32_t const iParentStatus = pVM->vmm.s.i32RendezvousStatus;
1943 PFNVMMEMTRENDEZVOUS const pfnParent = pVM->vmm.s.pfnRendezvous;
1944 void * const pvParentUser = pVM->vmm.s.pvRendezvousUser;
1945
1946 /*
1947 * Check preconditions and save the current state.
1948 */
1949 AssertReturn( (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1950 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
1951 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
1952 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
1953 VERR_INTERNAL_ERROR);
1954 AssertReturn(pVM->vmm.s.cRendezvousEmtsEntered == pVM->cCpus, VERR_INTERNAL_ERROR_2);
1955 AssertReturn(pVM->vmm.s.cRendezvousEmtsReturned == 0, VERR_INTERNAL_ERROR_3);
1956
1957 /*
1958 * Reset the recursion prep and pop semaphores.
1959 */
1960 int rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
1961 AssertLogRelRCReturn(rc, rc);
1962 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
1963 AssertLogRelRCReturn(rc, rc);
1964 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
1965 AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
1966 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
1967 AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
1968
1969 /*
1970 * Usher the other thread into the recursion routine.
1971 */
1972 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush, 0);
1973 ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, true);
1974
1975 uint32_t cLeft = pVM->cCpus - (cParentDone + 1U);
1976 if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1977 while (cLeft-- > 0)
1978 {
1979 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
1980 AssertLogRelRC(rc);
1981 }
1982 else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
1983 {
1984 Assert(cLeft == pVM->cCpus - (pVCpu->idCpu + 1U));
1985 for (VMCPUID iCpu = pVCpu->idCpu + 1U; iCpu < pVM->cCpus; iCpu++)
1986 {
1987 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu]);
1988 AssertLogRelRC(rc);
1989 }
1990 }
1991 else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1992 {
1993 Assert(cLeft == pVCpu->idCpu);
1994 for (VMCPUID iCpu = pVCpu->idCpu; iCpu > 0; iCpu--)
1995 {
1996 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu - 1U]);
1997 AssertLogRelRC(rc);
1998 }
1999 }
2000 else
2001 AssertLogRelReturn((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
2002 VERR_INTERNAL_ERROR_4);
2003
2004 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
2005 AssertLogRelRC(rc);
2006 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
2007 AssertLogRelRC(rc);
2008
2009
2010 /*
2011 * Wait for the EMTs to wake up and get out of the parent rendezvous code.
2012 */
2013 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) != pVM->cCpus)
2014 {
2015 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPushCaller, RT_INDEFINITE_WAIT);
2016 AssertLogRelRC(rc);
2017 }
2018
2019 ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, false);
2020
2021 /*
2022 * Clear the slate and setup the new rendezvous.
2023 */
2024 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2025 {
2026 rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
2027 AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2028 }
2029 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2030 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
2031 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
2032 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2033
2034 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
2035 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
2036 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
2037 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
2038 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
2039 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
2040 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
2041 ASMAtomicIncU32(&pVM->vmm.s.cRendezvousRecursions);
2042
2043 /*
2044 * We're ready to go now, do normal rendezvous processing.
2045 */
2046 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
2047 AssertLogRelRC(rc);
2048
2049 VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /*fIsCaller*/, fFlags, pfnRendezvous, pvUser);
2050
2051 /*
2052 * The caller waits for the other EMTs to be done, return and waiting on the
2053 * pop semaphore.
2054 */
2055 for (;;)
2056 {
2057 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
2058 AssertLogRelRC(rc);
2059 if (!pVM->vmm.s.fRendezvousRecursion)
2060 break;
2061 rcStrict = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict);
2062 }
2063
2064 /*
2065 * Get the return code and merge it with the above recursion status.
2066 */
2067 VBOXSTRICTRC rcStrict2 = pVM->vmm.s.i32RendezvousStatus;
2068 if ( rcStrict2 != VINF_SUCCESS
2069 && ( rcStrict == VINF_SUCCESS
2070 || rcStrict > rcStrict2))
2071 rcStrict = rcStrict2;
2072
2073 /*
2074 * Restore the parent rendezvous state.
2075 */
2076 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2077 {
2078 rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
2079 AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2080 }
2081 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2082 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
2083 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
2084 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2085
2086 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, pVM->cCpus);
2087 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
2088 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, cParentDone);
2089 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, iParentStatus);
2090 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fParentFlags);
2091 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvParentUser);
2092 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnParent);
2093
2094 /*
2095 * Usher the other EMTs back to their parent recursion routine, waiting
2096 * for them to all get there before we return (makes sure they've been
2097 * scheduled and are past the pop event sem, see below).
2098 */
2099 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop, 0);
2100 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
2101 AssertLogRelRC(rc);
2102
2103 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) != pVM->cCpus)
2104 {
2105 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPopCaller, RT_INDEFINITE_WAIT);
2106 AssertLogRelRC(rc);
2107 }
2108
2109 /*
2110 * We must reset the pop semaphore on the way out (doing the pop caller too,
2111 * just in case). The parent may be another recursion.
2112 */
2113 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop); AssertLogRelRC(rc);
2114 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2115
2116 ASMAtomicDecU32(&pVM->vmm.s.cRendezvousRecursions);
2117
2118 Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d returns %Rrc\n",
2119 fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions, VBOXSTRICTRC_VAL(rcStrict)));
2120 return rcStrict;
2121}
2122
2123
2124/**
2125 * EMT rendezvous.
2126 *
2127 * Gathers all the EMTs and execute some code on each of them, either in a one
2128 * by one fashion or all at once.
2129 *
2130 * @returns VBox strict status code. This will be the first error,
2131 * VINF_SUCCESS, or an EM scheduling status code.
2132 *
2133 * @retval VERR_DEADLOCK if recursion is attempted using a rendezvous type that
2134 * doesn't support it or if the recursion is too deep.
2135 *
2136 * @param pVM The cross context VM structure.
2137 * @param fFlags Flags indicating execution methods. See
2138 * grp_VMMR3EmtRendezvous_fFlags. The one-by-one,
2139 * descending and ascending rendezvous types support
2140 * recursion from inside @a pfnRendezvous.
2141 * @param pfnRendezvous The callback.
2142 * @param pvUser User argument for the callback.
2143 *
2144 * @thread Any.
2145 */
2146VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
2147{
2148 /*
2149 * Validate input.
2150 */
2151 AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
2152 AssertMsg( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID
2153 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) <= VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2154 && !(fFlags & ~VMMEMTRENDEZVOUS_FLAGS_VALID_MASK), ("%#x\n", fFlags));
2155 AssertMsg( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
2156 || ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE
2157 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE),
2158 ("type %u\n", fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK));
2159
2160 VBOXSTRICTRC rcStrict;
2161 PVMCPU pVCpu = VMMGetCpu(pVM);
2162 if (!pVCpu)
2163 /*
2164 * Forward the request to an EMT thread.
2165 */
2166 {
2167 Log(("VMMR3EmtRendezvous: %#x non-EMT\n", fFlags));
2168 if (!(fFlags & VMMEMTRENDEZVOUS_FLAGS_PRIORITY))
2169 rcStrict = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
2170 else
2171 rcStrict = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
2172 Log(("VMMR3EmtRendezvous: %#x non-EMT returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
2173 }
2174 else if (pVM->cCpus == 1)
2175 {
2176 /*
2177 * Shortcut for the single EMT case.
2178 */
2179 if (!pVCpu->vmm.s.fInRendezvous)
2180 {
2181 Log(("VMMR3EmtRendezvous: %#x EMT (uni)\n", fFlags));
2182 pVCpu->vmm.s.fInRendezvous = true;
2183 pVM->vmm.s.fRendezvousFlags = fFlags;
2184 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
2185 pVCpu->vmm.s.fInRendezvous = false;
2186 }
2187 else
2188 {
2189 /* Recursion. Do the same checks as in the SMP case. */
2190 Log(("VMMR3EmtRendezvous: %#x EMT (uni), recursion depth=%d\n", fFlags, pVM->vmm.s.cRendezvousRecursions));
2191 uint32_t fType = pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK;
2192 AssertLogRelReturn( !pVCpu->vmm.s.fInRendezvous
2193 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
2194 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2195 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
2196 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
2197 , VERR_DEADLOCK);
2198
2199 AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
2200 pVM->vmm.s.cRendezvousRecursions++;
2201 uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
2202 pVM->vmm.s.fRendezvousFlags = fFlags;
2203
2204 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
2205
2206 pVM->vmm.s.fRendezvousFlags = fParentFlags;
2207 pVM->vmm.s.cRendezvousRecursions--;
2208 }
2209 Log(("VMMR3EmtRendezvous: %#x EMT (uni) returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
2210 }
2211 else
2212 {
2213 /*
2214 * Spin lock. If busy, check for recursion, if not recursing wait for
2215 * the other EMT to finish while keeping a lookout for the RENDEZVOUS FF.
2216 */
2217 int rc;
2218 rcStrict = VINF_SUCCESS;
2219 if (RT_UNLIKELY(!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)))
2220 {
2221 /* Allow recursion in some cases. */
2222 if ( pVCpu->vmm.s.fInRendezvous
2223 && ( (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
2224 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2225 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
2226 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
2227 ))
2228 return VBOXSTRICTRC_TODO(vmmR3EmtRendezvousRecursive(pVM, pVCpu, fFlags, pfnRendezvous, pvUser));
2229
2230 AssertLogRelMsgReturn(!pVCpu->vmm.s.fInRendezvous, ("fRendezvousFlags=%#x\n", pVM->vmm.s.fRendezvousFlags),
2231 VERR_DEADLOCK);
2232
2233 Log(("VMMR3EmtRendezvous: %#x EMT#%u, waiting for lock...\n", fFlags, pVCpu->idCpu));
2234 while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
2235 {
2236 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2237 {
2238 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
2239 if ( rc != VINF_SUCCESS
2240 && ( rcStrict == VINF_SUCCESS
2241 || rcStrict > rc))
2242 rcStrict = rc;
2243 /** @todo Perhaps deal with termination here? */
2244 }
2245 ASMNopPause();
2246 }
2247 }
2248
2249 Log(("VMMR3EmtRendezvous: %#x EMT#%u\n", fFlags, pVCpu->idCpu));
2250 Assert(!VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS));
2251 Assert(!pVCpu->vmm.s.fInRendezvous);
2252 pVCpu->vmm.s.fInRendezvous = true;
2253
2254 /*
2255 * Clear the slate and setup the rendezvous. This is a semaphore ping-pong orgy. :-)
2256 */
2257 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2258 {
2259 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i], 0);
2260 AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2261 }
2262 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2263 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
2264 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
2265 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2266 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
2267 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
2268 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
2269 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
2270 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
2271 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
2272 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
2273
2274 /*
2275 * Set the FF and poke the other EMTs.
2276 */
2277 VM_FF_SET(pVM, VM_FF_EMT_RENDEZVOUS);
2278 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_POKE);
2279
2280 /*
2281 * Do the same ourselves.
2282 */
2283 VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /* fIsCaller */, fFlags, pfnRendezvous, pvUser);
2284
2285 /*
2286 * The caller waits for the other EMTs to be done and return before doing
2287 * the cleanup. This makes away with wakeup / reset races we would otherwise
2288 * risk in the multiple release event semaphore code (hEvtRendezvousDoneCaller).
2289 */
2290 for (;;)
2291 {
2292 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
2293 AssertLogRelRC(rc);
2294 if (!pVM->vmm.s.fRendezvousRecursion)
2295 break;
2296 rcStrict2 = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict2);
2297 }
2298
2299 /*
2300 * Get the return code and clean up a little bit.
2301 */
2302 VBOXSTRICTRC rcStrict3 = pVM->vmm.s.i32RendezvousStatus;
2303 ASMAtomicWriteNullPtr((void * volatile *)&pVM->vmm.s.pfnRendezvous);
2304
2305 ASMAtomicWriteU32(&pVM->vmm.s.u32RendezvousLock, 0);
2306 pVCpu->vmm.s.fInRendezvous = false;
2307
2308 /*
2309 * Merge rcStrict, rcStrict2 and rcStrict3.
2310 */
2311 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2312 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
2313 if ( rcStrict2 != VINF_SUCCESS
2314 && ( rcStrict == VINF_SUCCESS
2315 || rcStrict > rcStrict2))
2316 rcStrict = rcStrict2;
2317 if ( rcStrict3 != VINF_SUCCESS
2318 && ( rcStrict == VINF_SUCCESS
2319 || rcStrict > rcStrict3))
2320 rcStrict = rcStrict3;
2321 Log(("VMMR3EmtRendezvous: %#x EMT#%u returns %Rrc\n", fFlags, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
2322 }
2323
2324 AssertLogRelMsgReturn( rcStrict <= VINF_SUCCESS
2325 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
2326 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
2327 VERR_IPE_UNEXPECTED_INFO_STATUS);
2328 return VBOXSTRICTRC_VAL(rcStrict);
2329}
2330
2331
2332/**
2333 * Read from the ring 0 jump buffer stack
2334 *
2335 * @returns VBox status code.
2336 *
2337 * @param pVM The cross context VM structure.
2338 * @param idCpu The ID of the source CPU context (for the address).
2339 * @param R0Addr Where to start reading.
2340 * @param pvBuf Where to store the data we've read.
2341 * @param cbRead The number of bytes to read.
2342 */
2343VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead)
2344{
2345 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
2346 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
2347
2348#ifdef VMM_R0_SWITCH_STACK
2349 RTHCUINTPTR off = R0Addr - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
2350#else
2351 RTHCUINTPTR off = pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack - (pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck - R0Addr);
2352#endif
2353 if ( off > VMM_STACK_SIZE
2354 || off + cbRead >= VMM_STACK_SIZE)
2355 return VERR_INVALID_POINTER;
2356
2357 memcpy(pvBuf, &pVCpu->vmm.s.pbEMTStackR3[off], cbRead);
2358 return VINF_SUCCESS;
2359}
2360
2361#ifdef VBOX_WITH_RAW_MODE
2362
2363/**
2364 * Calls a RC function.
2365 *
2366 * @param pVM The cross context VM structure.
2367 * @param RCPtrEntry The address of the RC function.
2368 * @param cArgs The number of arguments in the ....
2369 * @param ... Arguments to the function.
2370 */
2371VMMR3DECL(int) VMMR3CallRC(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, ...)
2372{
2373 va_list args;
2374 va_start(args, cArgs);
2375 int rc = VMMR3CallRCV(pVM, RCPtrEntry, cArgs, args);
2376 va_end(args);
2377 return rc;
2378}
2379
2380
2381/**
2382 * Calls a RC function.
2383 *
2384 * @param pVM The cross context VM structure.
2385 * @param RCPtrEntry The address of the RC function.
2386 * @param cArgs The number of arguments in the ....
2387 * @param args Arguments to the function.
2388 */
2389VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list args)
2390{
2391 /* Raw mode implies 1 VCPU. */
2392 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
2393 PVMCPU pVCpu = &pVM->aCpus[0];
2394
2395 Log2(("VMMR3CallGCV: RCPtrEntry=%RRv cArgs=%d\n", RCPtrEntry, cArgs));
2396
2397 /*
2398 * Setup the call frame using the trampoline.
2399 */
2400 CPUMSetHyperState(pVCpu,
2401 pVM->vmm.s.pfnCallTrampolineRC, /* eip */
2402 pVCpu->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32), /* esp */
2403 RCPtrEntry, /* eax */
2404 cArgs /* edx */
2405 );
2406
2407#if 0
2408 memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
2409#endif
2410 PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
2411 int i = cArgs;
2412 while (i-- > 0)
2413 *pFrame++ = va_arg(args, RTGCUINTPTR32);
2414
2415 CPUMPushHyper(pVCpu, cArgs * sizeof(RTGCUINTPTR32)); /* stack frame size */
2416 CPUMPushHyper(pVCpu, RCPtrEntry); /* what to call */
2417
2418 /*
2419 * We hide log flushes (outer) and hypervisor interrupts (inner).
2420 */
2421 for (;;)
2422 {
2423 int rc;
2424 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
2425 do
2426 {
2427#ifdef NO_SUPCALLR0VMM
2428 rc = VERR_GENERAL_FAILURE;
2429#else
2430 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
2431 if (RT_LIKELY(rc == VINF_SUCCESS))
2432 rc = pVCpu->vmm.s.iLastGZRc;
2433#endif
2434 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2435
2436 /*
2437 * Flush the loggers.
2438 */
2439#ifdef LOG_ENABLED
2440 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
2441 if ( pLogger
2442 && pLogger->offScratch > 0)
2443 RTLogFlushRC(NULL, pLogger);
2444#endif
2445#ifdef VBOX_WITH_RC_RELEASE_LOGGING
2446 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
2447 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2448 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
2449#endif
2450 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2451 VMMR3FatalDump(pVM, pVCpu, rc);
2452 if (rc != VINF_VMM_CALL_HOST)
2453 {
2454 Log2(("VMMR3CallGCV: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
2455 return rc;
2456 }
2457 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2458 if (RT_FAILURE(rc))
2459 return rc;
2460 }
2461}
2462
2463#endif /* VBOX_WITH_RAW_MODE */
2464
2465/**
2466 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
2467 *
2468 * @returns VBox status code.
2469 * @param pVM The cross context VM structure.
2470 * @param uOperation Operation to execute.
2471 * @param u64Arg Constant argument.
2472 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
2473 * details.
2474 */
2475VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
2476{
2477 PVMCPU pVCpu = VMMGetCpu(pVM);
2478 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
2479
2480 /*
2481 * Call Ring-0 entry with init code.
2482 */
2483 int rc;
2484 for (;;)
2485 {
2486#ifdef NO_SUPCALLR0VMM
2487 rc = VERR_GENERAL_FAILURE;
2488#else
2489 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, uOperation, u64Arg, pReqHdr);
2490#endif
2491 /*
2492 * Flush the logs.
2493 */
2494#ifdef LOG_ENABLED
2495 if ( pVCpu->vmm.s.pR0LoggerR3
2496 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
2497 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
2498#endif
2499 if (rc != VINF_VMM_CALL_HOST)
2500 break;
2501 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2502 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
2503 break;
2504 /* Resume R0 */
2505 }
2506
2507 AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
2508 ("uOperation=%u rc=%Rrc\n", uOperation, rc),
2509 VERR_IPE_UNEXPECTED_INFO_STATUS);
2510 return rc;
2511}
2512
2513
2514#ifdef VBOX_WITH_RAW_MODE
2515/**
2516 * Resumes executing hypervisor code when interrupted by a queue flush or a
2517 * debug event.
2518 *
2519 * @returns VBox status code.
2520 * @param pVM The cross context VM structure.
2521 * @param pVCpu The cross context virtual CPU structure.
2522 */
2523VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu)
2524{
2525 Log(("VMMR3ResumeHyper: eip=%RRv esp=%RRv\n", CPUMGetHyperEIP(pVCpu), CPUMGetHyperESP(pVCpu)));
2526 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
2527
2528 /*
2529 * We hide log flushes (outer) and hypervisor interrupts (inner).
2530 */
2531 for (;;)
2532 {
2533 int rc;
2534 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
2535 do
2536 {
2537# ifdef NO_SUPCALLR0VMM
2538 rc = VERR_GENERAL_FAILURE;
2539# else
2540 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
2541 if (RT_LIKELY(rc == VINF_SUCCESS))
2542 rc = pVCpu->vmm.s.iLastGZRc;
2543# endif
2544 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2545
2546 /*
2547 * Flush the loggers.
2548 */
2549# ifdef LOG_ENABLED
2550 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
2551 if ( pLogger
2552 && pLogger->offScratch > 0)
2553 RTLogFlushRC(NULL, pLogger);
2554# endif
2555# ifdef VBOX_WITH_RC_RELEASE_LOGGING
2556 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
2557 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2558 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
2559# endif
2560 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2561 VMMR3FatalDump(pVM, pVCpu, rc);
2562 if (rc != VINF_VMM_CALL_HOST)
2563 {
2564 Log(("VMMR3ResumeHyper: returns %Rrc\n", rc));
2565 return rc;
2566 }
2567 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2568 if (RT_FAILURE(rc))
2569 return rc;
2570 }
2571}
2572#endif /* VBOX_WITH_RAW_MODE */
2573
2574
2575/**
2576 * Service a call to the ring-3 host code.
2577 *
2578 * @returns VBox status code.
2579 * @param pVM The cross context VM structure.
2580 * @param pVCpu The cross context virtual CPU structure.
2581 * @remarks Careful with critsects.
2582 */
2583static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
2584{
2585 /*
2586 * We must also check for pending critsect exits or else we can deadlock
2587 * when entering other critsects here.
2588 */
2589 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
2590 PDMCritSectBothFF(pVCpu);
2591
2592 switch (pVCpu->vmm.s.enmCallRing3Operation)
2593 {
2594 /*
2595 * Acquire a critical section.
2596 */
2597 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
2598 {
2599 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectEnterEx((PPDMCRITSECT)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2600 true /*fCallRing3*/);
2601 break;
2602 }
2603
2604 /*
2605 * Enter a r/w critical section exclusively.
2606 */
2607 case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL:
2608 {
2609 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterExclEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2610 true /*fCallRing3*/);
2611 break;
2612 }
2613
2614 /*
2615 * Enter a r/w critical section shared.
2616 */
2617 case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED:
2618 {
2619 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterSharedEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2620 true /*fCallRing3*/);
2621 break;
2622 }
2623
2624 /*
2625 * Acquire the PDM lock.
2626 */
2627 case VMMCALLRING3_PDM_LOCK:
2628 {
2629 pVCpu->vmm.s.rcCallRing3 = PDMR3LockCall(pVM);
2630 break;
2631 }
2632
2633 /*
2634 * Grow the PGM pool.
2635 */
2636 case VMMCALLRING3_PGM_POOL_GROW:
2637 {
2638 pVCpu->vmm.s.rcCallRing3 = PGMR3PoolGrow(pVM);
2639 break;
2640 }
2641
2642 /*
2643 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
2644 */
2645 case VMMCALLRING3_PGM_MAP_CHUNK:
2646 {
2647 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysChunkMap(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2648 break;
2649 }
2650
2651 /*
2652 * Allocates more handy pages.
2653 */
2654 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
2655 {
2656 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateHandyPages(pVM);
2657 break;
2658 }
2659
2660 /*
2661 * Allocates a large page.
2662 */
2663 case VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2664 {
2665 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateLargeHandyPage(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2666 break;
2667 }
2668
2669 /*
2670 * Acquire the PGM lock.
2671 */
2672 case VMMCALLRING3_PGM_LOCK:
2673 {
2674 pVCpu->vmm.s.rcCallRing3 = PGMR3LockCall(pVM);
2675 break;
2676 }
2677
2678 /*
2679 * Acquire the MM hypervisor heap lock.
2680 */
2681 case VMMCALLRING3_MMHYPER_LOCK:
2682 {
2683 pVCpu->vmm.s.rcCallRing3 = MMR3LockCall(pVM);
2684 break;
2685 }
2686
2687#ifdef VBOX_WITH_REM
2688 /*
2689 * Flush REM handler notifications.
2690 */
2691 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
2692 {
2693 REMR3ReplayHandlerNotifications(pVM);
2694 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2695 break;
2696 }
2697#endif
2698
2699 /*
2700 * This is a noop. We just take this route to avoid unnecessary
2701 * tests in the loops.
2702 */
2703 case VMMCALLRING3_VMM_LOGGER_FLUSH:
2704 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2705 LogAlways(("*FLUSH*\n"));
2706 break;
2707
2708 /*
2709 * Set the VM error message.
2710 */
2711 case VMMCALLRING3_VM_SET_ERROR:
2712 VMR3SetErrorWorker(pVM);
2713 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2714 break;
2715
2716 /*
2717 * Set the VM runtime error message.
2718 */
2719 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
2720 pVCpu->vmm.s.rcCallRing3 = VMR3SetRuntimeErrorWorker(pVM);
2721 break;
2722
2723 /*
2724 * Signal a ring 0 hypervisor assertion.
2725 * Cancel the longjmp operation that's in progress.
2726 */
2727 case VMMCALLRING3_VM_R0_ASSERTION:
2728 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
2729 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
2730#ifdef RT_ARCH_X86
2731 pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
2732#else
2733 pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
2734#endif
2735#ifdef VMM_R0_SWITCH_STACK
2736 *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker */
2737#endif
2738 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
2739 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
2740 return VERR_VMM_RING0_ASSERTION;
2741
2742 /*
2743 * A forced switch to ring 0 for preemption purposes.
2744 */
2745 case VMMCALLRING3_VM_R0_PREEMPT:
2746 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2747 break;
2748
2749 case VMMCALLRING3_FTM_SET_CHECKPOINT:
2750 pVCpu->vmm.s.rcCallRing3 = FTMR3SetCheckpoint(pVM, (FTMCHECKPOINTTYPE)pVCpu->vmm.s.u64CallRing3Arg);
2751 break;
2752
2753 default:
2754 AssertMsgFailed(("enmCallRing3Operation=%d\n", pVCpu->vmm.s.enmCallRing3Operation));
2755 return VERR_VMM_UNKNOWN_RING3_CALL;
2756 }
2757
2758 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
2759 return VINF_SUCCESS;
2760}
2761
2762
2763/**
2764 * Displays the Force action Flags.
2765 *
2766 * @param pVM The cross context VM structure.
2767 * @param pHlp The output helpers.
2768 * @param pszArgs The additional arguments (ignored).
2769 */
2770static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2771{
2772 int c;
2773 uint32_t f;
2774 NOREF(pszArgs);
2775
2776#define PRINT_FLAG(prf,flag) do { \
2777 if (f & (prf##flag)) \
2778 { \
2779 static const char *s_psz = #flag; \
2780 if (!(c % 6)) \
2781 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz); \
2782 else \
2783 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2784 c++; \
2785 f &= ~(prf##flag); \
2786 } \
2787 } while (0)
2788
2789#define PRINT_GROUP(prf,grp,sfx) do { \
2790 if (f & (prf##grp##sfx)) \
2791 { \
2792 static const char *s_psz = #grp; \
2793 if (!(c % 5)) \
2794 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : " Groups:\n", s_psz); \
2795 else \
2796 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2797 c++; \
2798 } \
2799 } while (0)
2800
2801 /*
2802 * The global flags.
2803 */
2804 const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
2805 pHlp->pfnPrintf(pHlp, "Global FFs: %#RX32", fGlobalForcedActions);
2806
2807 /* show the flag mnemonics */
2808 c = 0;
2809 f = fGlobalForcedActions;
2810 PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC);
2811 PRINT_FLAG(VM_FF_,PDM_QUEUES);
2812 PRINT_FLAG(VM_FF_,PDM_DMA);
2813 PRINT_FLAG(VM_FF_,DBGF);
2814 PRINT_FLAG(VM_FF_,REQUEST);
2815 PRINT_FLAG(VM_FF_,CHECK_VM_STATE);
2816 PRINT_FLAG(VM_FF_,RESET);
2817 PRINT_FLAG(VM_FF_,EMT_RENDEZVOUS);
2818 PRINT_FLAG(VM_FF_,PGM_NEED_HANDY_PAGES);
2819 PRINT_FLAG(VM_FF_,PGM_NO_MEMORY);
2820 PRINT_FLAG(VM_FF_,PGM_POOL_FLUSH_PENDING);
2821 PRINT_FLAG(VM_FF_,REM_HANDLER_NOTIFY);
2822 PRINT_FLAG(VM_FF_,DEBUG_SUSPEND);
2823 if (f)
2824 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2825 else
2826 pHlp->pfnPrintf(pHlp, "\n");
2827
2828 /* the groups */
2829 c = 0;
2830 f = fGlobalForcedActions;
2831 PRINT_GROUP(VM_FF_,EXTERNAL_SUSPENDED,_MASK);
2832 PRINT_GROUP(VM_FF_,EXTERNAL_HALTED,_MASK);
2833 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE,_MASK);
2834 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2835 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_POST,_MASK);
2836 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY_POST,_MASK);
2837 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY,_MASK);
2838 PRINT_GROUP(VM_FF_,ALL_REM,_MASK);
2839 if (c)
2840 pHlp->pfnPrintf(pHlp, "\n");
2841
2842 /*
2843 * Per CPU flags.
2844 */
2845 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2846 {
2847 const uint32_t fLocalForcedActions = pVM->aCpus[i].fLocalForcedActions;
2848 pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX32", i, fLocalForcedActions);
2849
2850 /* show the flag mnemonics */
2851 c = 0;
2852 f = fLocalForcedActions;
2853 PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC);
2854 PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC);
2855 PRINT_FLAG(VMCPU_FF_,TIMER);
2856 PRINT_FLAG(VMCPU_FF_,INTERRUPT_NMI);
2857 PRINT_FLAG(VMCPU_FF_,INTERRUPT_SMI);
2858 PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT);
2859 PRINT_FLAG(VMCPU_FF_,UNHALT);
2860 PRINT_FLAG(VMCPU_FF_,IEM);
2861 PRINT_FLAG(VMCPU_FF_,UPDATE_APIC);
2862 PRINT_FLAG(VMCPU_FF_,DBGF);
2863 PRINT_FLAG(VMCPU_FF_,REQUEST);
2864 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_CR3);
2865 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_PAE_PDPES);
2866 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
2867 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
2868 PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
2869 PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);
2870 PRINT_FLAG(VMCPU_FF_,BLOCK_NMIS);
2871 PRINT_FLAG(VMCPU_FF_,TO_R3);
2872 PRINT_FLAG(VMCPU_FF_,IOM);
2873#ifdef VBOX_WITH_RAW_MODE
2874 PRINT_FLAG(VMCPU_FF_,TRPM_SYNC_IDT);
2875 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_TSS);
2876 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_GDT);
2877 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_LDT);
2878 PRINT_FLAG(VMCPU_FF_,CSAM_SCAN_PAGE);
2879 PRINT_FLAG(VMCPU_FF_,CSAM_PENDING_ACTION);
2880 PRINT_FLAG(VMCPU_FF_,CPUM);
2881#endif
2882 if (f)
2883 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2884 else
2885 pHlp->pfnPrintf(pHlp, "\n");
2886
2887 if (fLocalForcedActions & VMCPU_FF_INHIBIT_INTERRUPTS)
2888 pHlp->pfnPrintf(pHlp, " intr inhibit RIP: %RGp\n", EMGetInhibitInterruptsPC(&pVM->aCpus[i]));
2889
2890 /* the groups */
2891 c = 0;
2892 f = fLocalForcedActions;
2893 PRINT_GROUP(VMCPU_FF_,EXTERNAL_SUSPENDED,_MASK);
2894 PRINT_GROUP(VMCPU_FF_,EXTERNAL_HALTED,_MASK);
2895 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE,_MASK);
2896 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2897 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_POST,_MASK);
2898 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY_POST,_MASK);
2899 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
2900 PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
2901 PRINT_GROUP(VMCPU_FF_,HM_TO_R3,_MASK);
2902 PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
2903 if (c)
2904 pHlp->pfnPrintf(pHlp, "\n");
2905 }
2906
2907#undef PRINT_FLAG
2908#undef PRINT_GROUP
2909}
2910
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette