VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMM.cpp@ 78126

Last change on this file since 78126 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 125.2 KB
Line 
1/* $Id: VMM.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18//#define NO_SUPCALLR0VMM
19
20/** @page pg_vmm VMM - The Virtual Machine Monitor
21 *
22 * The VMM component is two things at the moment, it's a component doing a few
23 * management and routing tasks, and it's the whole virtual machine monitor
24 * thing. For hysterical reasons, it is not doing all the management that one
25 * would expect, this is instead done by @ref pg_vm. We'll address this
26 * misdesign eventually, maybe.
27 *
28 * VMM is made up of these components:
29 * - @subpage pg_cfgm
30 * - @subpage pg_cpum
31 * - @subpage pg_csam
32 * - @subpage pg_dbgf
33 * - @subpage pg_em
34 * - @subpage pg_gim
35 * - @subpage pg_gmm
36 * - @subpage pg_gvmm
37 * - @subpage pg_hm
38 * - @subpage pg_iem
39 * - @subpage pg_iom
40 * - @subpage pg_mm
41 * - @subpage pg_patm
42 * - @subpage pg_pdm
43 * - @subpage pg_pgm
44 * - @subpage pg_rem
45 * - @subpage pg_selm
46 * - @subpage pg_ssm
47 * - @subpage pg_stam
48 * - @subpage pg_tm
49 * - @subpage pg_trpm
50 * - @subpage pg_vm
51 *
52 *
53 * @see @ref grp_vmm @ref grp_vm @subpage pg_vmm_guideline @subpage pg_raw
54 *
55 *
56 * @section sec_vmmstate VMM State
57 *
58 * @image html VM_Statechart_Diagram.gif
59 *
60 * To be written.
61 *
62 *
63 * @subsection subsec_vmm_init VMM Initialization
64 *
65 * To be written.
66 *
67 *
68 * @subsection subsec_vmm_term VMM Termination
69 *
70 * To be written.
71 *
72 *
73 * @section sec_vmm_limits VMM Limits
74 *
75 * There are various resource limits imposed by the VMM and it's
76 * sub-components. We'll list some of them here.
77 *
78 * On 64-bit hosts:
79 * - Max 8191 VMs. Imposed by GVMM's handle allocation (GVMM_MAX_HANDLES),
80 * can be increased up to 64K - 1.
81 * - Max 16TB - 64KB of the host memory can be used for backing VM RAM and
82 * ROM pages. The limit is imposed by the 32-bit page ID used by GMM.
83 * - A VM can be assigned all the memory we can use (16TB), however, the
84 * Main API will restrict this to 2TB (MM_RAM_MAX_IN_MB).
85 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
86 *
87 * On 32-bit hosts:
88 * - Max 127 VMs. Imposed by GMM's per page structure.
89 * - Max 64GB - 64KB of the host memory can be used for backing VM RAM and
90 * ROM pages. The limit is imposed by the 28-bit page ID used
91 * internally in GMM. It is also limited by PAE.
92 * - A VM can be assigned all the memory GMM can allocate, however, the
93 * Main API will restrict this to 3584MB (MM_RAM_MAX_IN_MB).
94 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
95 *
96 */
97
98
99/*********************************************************************************************************************************
100* Header Files *
101*********************************************************************************************************************************/
102#define LOG_GROUP LOG_GROUP_VMM
103#include <VBox/vmm/vmm.h>
104#include <VBox/vmm/vmapi.h>
105#include <VBox/vmm/pgm.h>
106#include <VBox/vmm/cfgm.h>
107#include <VBox/vmm/pdmqueue.h>
108#include <VBox/vmm/pdmcritsect.h>
109#include <VBox/vmm/pdmcritsectrw.h>
110#include <VBox/vmm/pdmapi.h>
111#include <VBox/vmm/cpum.h>
112#include <VBox/vmm/gim.h>
113#include <VBox/vmm/mm.h>
114#include <VBox/vmm/nem.h>
115#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
116# include <VBox/vmm/iem.h>
117#endif
118#include <VBox/vmm/iom.h>
119#include <VBox/vmm/trpm.h>
120#include <VBox/vmm/selm.h>
121#include <VBox/vmm/em.h>
122#include <VBox/sup.h>
123#include <VBox/vmm/dbgf.h>
124#include <VBox/vmm/csam.h>
125#include <VBox/vmm/patm.h>
126#include <VBox/vmm/apic.h>
127#ifdef VBOX_WITH_REM
128# include <VBox/vmm/rem.h>
129#endif
130#include <VBox/vmm/ssm.h>
131#include <VBox/vmm/ftm.h>
132#include <VBox/vmm/tm.h>
133#include "VMMInternal.h"
134#include "VMMSwitcher.h"
135#include <VBox/vmm/vm.h>
136#include <VBox/vmm/uvm.h>
137
138#include <VBox/err.h>
139#include <VBox/param.h>
140#include <VBox/version.h>
141#include <VBox/vmm/hm.h>
142#include <iprt/assert.h>
143#include <iprt/alloc.h>
144#include <iprt/asm.h>
145#include <iprt/time.h>
146#include <iprt/semaphore.h>
147#include <iprt/stream.h>
148#include <iprt/string.h>
149#include <iprt/stdarg.h>
150#include <iprt/ctype.h>
151#include <iprt/x86.h>
152
153
154/*********************************************************************************************************************************
155* Defined Constants And Macros *
156*********************************************************************************************************************************/
157/** The saved state version. */
158#define VMM_SAVED_STATE_VERSION 4
159/** The saved state version used by v3.0 and earlier. (Teleportation) */
160#define VMM_SAVED_STATE_VERSION_3_0 3
161
162/** Macro for flushing the ring-0 logging. */
163#define VMM_FLUSH_R0_LOG(a_pR0Logger, a_pR3Logger) \
164 do { \
165 PVMMR0LOGGER pVmmLogger = (a_pR0Logger); \
166 if (!pVmmLogger || pVmmLogger->Logger.offScratch == 0) \
167 { /* likely? */ } \
168 else \
169 RTLogFlushR0(a_pR3Logger, &pVmmLogger->Logger); \
170 } while (0)
171
172
173/*********************************************************************************************************************************
174* Internal Functions *
175*********************************************************************************************************************************/
176static int vmmR3InitStacks(PVM pVM);
177static int vmmR3InitLoggers(PVM pVM);
178static void vmmR3InitRegisterStats(PVM pVM);
179static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
180static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
181static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
182static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
183 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
184static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu);
185static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
186
187
188/**
189 * Initializes the VMM.
190 *
191 * @returns VBox status code.
192 * @param pVM The cross context VM structure.
193 */
194VMMR3_INT_DECL(int) VMMR3Init(PVM pVM)
195{
196 LogFlow(("VMMR3Init\n"));
197
198 /*
199 * Assert alignment, sizes and order.
200 */
201 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
202 AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding));
203 AssertCompile(sizeof(pVM->aCpus[0].vmm.s) <= sizeof(pVM->aCpus[0].vmm.padding));
204
205 /*
206 * Init basic VM VMM members.
207 */
208 pVM->vmm.s.offVM = RT_UOFFSETOF(VM, vmm);
209 pVM->vmm.s.pahEvtRendezvousEnterOrdered = NULL;
210 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
211 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
212 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
213 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
214 pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
215 pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
216 pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
217 pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
218
219 /** @cfgm{/YieldEMTInterval, uint32_t, 1, UINT32_MAX, 23, ms}
220 * The EMT yield interval. The EMT yielding is a hack we employ to play a
221 * bit nicer with the rest of the system (like for instance the GUI).
222 */
223 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies,
224 23 /* Value arrived at after experimenting with the grub boot prompt. */);
225 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
226
227
228 /** @cfgm{/VMM/UsePeriodicPreemptionTimers, boolean, true}
229 * Controls whether we employ per-cpu preemption timers to limit the time
230 * spent executing guest code. This option is not available on all
231 * platforms and we will silently ignore this setting then. If we are
232 * running in VT-x mode, we will use the VMX-preemption timer instead of
233 * this one when possible.
234 */
235 PCFGMNODE pCfgVMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "VMM");
236 rc = CFGMR3QueryBoolDef(pCfgVMM, "UsePeriodicPreemptionTimers", &pVM->vmm.s.fUsePeriodicPreemptionTimers, true);
237 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"VMM/UsePeriodicPreemptionTimers\", rc=%Rrc\n", rc), rc);
238
239 /*
240 * Initialize the VMM rendezvous semaphores.
241 */
242 pVM->vmm.s.pahEvtRendezvousEnterOrdered = (PRTSEMEVENT)MMR3HeapAlloc(pVM, MM_TAG_VMM, sizeof(RTSEMEVENT) * pVM->cCpus);
243 if (!pVM->vmm.s.pahEvtRendezvousEnterOrdered)
244 return VERR_NO_MEMORY;
245 for (VMCPUID i = 0; i < pVM->cCpus; i++)
246 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
247 for (VMCPUID i = 0; i < pVM->cCpus; i++)
248 {
249 rc = RTSemEventCreate(&pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
250 AssertRCReturn(rc, rc);
251 }
252 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousEnterOneByOne);
253 AssertRCReturn(rc, rc);
254 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
255 AssertRCReturn(rc, rc);
256 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousDone);
257 AssertRCReturn(rc, rc);
258 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
259 AssertRCReturn(rc, rc);
260 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPush);
261 AssertRCReturn(rc, rc);
262 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPop);
263 AssertRCReturn(rc, rc);
264 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
265 AssertRCReturn(rc, rc);
266 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
267 AssertRCReturn(rc, rc);
268
269 /*
270 * Register the saved state data unit.
271 */
272 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
273 NULL, NULL, NULL,
274 NULL, vmmR3Save, NULL,
275 NULL, vmmR3Load, NULL);
276 if (RT_FAILURE(rc))
277 return rc;
278
279 /*
280 * Register the Ring-0 VM handle with the session for fast ioctl calls.
281 */
282 rc = SUPR3SetVMForFastIOCtl(pVM->pVMR0);
283 if (RT_FAILURE(rc))
284 return rc;
285
286 /*
287 * Init various sub-components.
288 */
289 rc = vmmR3SwitcherInit(pVM);
290 if (RT_SUCCESS(rc))
291 {
292 rc = vmmR3InitStacks(pVM);
293 if (RT_SUCCESS(rc))
294 {
295 rc = vmmR3InitLoggers(pVM);
296
297#ifdef VBOX_WITH_NMI
298 /*
299 * Allocate mapping for the host APIC.
300 */
301 if (RT_SUCCESS(rc))
302 {
303 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
304 AssertRC(rc);
305 }
306#endif
307 if (RT_SUCCESS(rc))
308 {
309 /*
310 * Debug info and statistics.
311 */
312 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
313 vmmR3InitRegisterStats(pVM);
314 vmmInitFormatTypes();
315
316 return VINF_SUCCESS;
317 }
318 }
319 /** @todo Need failure cleanup. */
320
321 //more todo in here?
322 //if (RT_SUCCESS(rc))
323 //{
324 //}
325 //int rc2 = vmmR3TermCoreCode(pVM);
326 //AssertRC(rc2));
327 }
328
329 return rc;
330}
331
332
333/**
334 * Allocate & setup the VMM RC stack(s) (for EMTs).
335 *
336 * The stacks are also used for long jumps in Ring-0.
337 *
338 * @returns VBox status code.
339 * @param pVM The cross context VM structure.
340 *
341 * @remarks The optional guard page gets it protection setup up during R3 init
342 * completion because of init order issues.
343 */
344static int vmmR3InitStacks(PVM pVM)
345{
346 int rc = VINF_SUCCESS;
347#ifdef VMM_R0_SWITCH_STACK
348 uint32_t fFlags = MMHYPER_AONR_FLAGS_KERNEL_MAPPING;
349#else
350 uint32_t fFlags = 0;
351#endif
352
353 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
354 {
355 PVMCPU pVCpu = &pVM->aCpus[idCpu];
356
357#ifdef VBOX_STRICT_VMM_STACK
358 rc = MMR3HyperAllocOnceNoRelEx(pVM, PAGE_SIZE + VMM_STACK_SIZE + PAGE_SIZE,
359#else
360 rc = MMR3HyperAllocOnceNoRelEx(pVM, VMM_STACK_SIZE,
361#endif
362 PAGE_SIZE, MM_TAG_VMM, fFlags, (void **)&pVCpu->vmm.s.pbEMTStackR3);
363 if (RT_SUCCESS(rc))
364 {
365#ifdef VBOX_STRICT_VMM_STACK
366 pVCpu->vmm.s.pbEMTStackR3 += PAGE_SIZE;
367#endif
368#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
369 /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
370 if (VM_IS_RAW_MODE_ENABLED(pVM))
371 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = NIL_RTR0PTR;
372 else
373#endif
374 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
375 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
376 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
377 AssertRelease(pVCpu->vmm.s.pbEMTStackRC);
378
379 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC);
380 }
381 }
382
383 return rc;
384}
385
386
387/**
388 * Initialize the loggers.
389 *
390 * @returns VBox status code.
391 * @param pVM The cross context VM structure.
392 */
393static int vmmR3InitLoggers(PVM pVM)
394{
395 int rc;
396#define RTLogCalcSizeForR0(cGroups, fFlags) (RT_UOFFSETOF_DYN(VMMR0LOGGER, Logger.afGroups[cGroups]) + PAGE_SIZE)
397
398 /*
399 * Allocate RC & R0 Logger instances (they are finalized in the relocator).
400 */
401#ifdef LOG_ENABLED
402 PRTLOGGER pLogger = RTLogDefaultInstance();
403 if (pLogger)
404 {
405 if (VM_IS_RAW_MODE_ENABLED(pVM))
406 {
407 pVM->vmm.s.cbRCLogger = RT_UOFFSETOF_DYN(RTLOGGERRC, afGroups[pLogger->cGroups]);
408 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
409 if (RT_FAILURE(rc))
410 return rc;
411 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
412 }
413
414# ifdef VBOX_WITH_R0_LOGGING
415 size_t const cbLogger = RTLogCalcSizeForR0(pLogger->cGroups, 0);
416 for (VMCPUID i = 0; i < pVM->cCpus; i++)
417 {
418 PVMCPU pVCpu = &pVM->aCpus[i];
419 rc = MMR3HyperAllocOnceNoRelEx(pVM, cbLogger, PAGE_SIZE, MM_TAG_VMM, MMHYPER_AONR_FLAGS_KERNEL_MAPPING,
420 (void **)&pVCpu->vmm.s.pR0LoggerR3);
421 if (RT_FAILURE(rc))
422 return rc;
423 pVCpu->vmm.s.pR0LoggerR3->pVM = pVM->pVMR0;
424 //pVCpu->vmm.s.pR0LoggerR3->fCreated = false;
425 pVCpu->vmm.s.pR0LoggerR3->cbLogger = (uint32_t)cbLogger;
426 pVCpu->vmm.s.pR0LoggerR0 = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pR0LoggerR3);
427 }
428# endif
429 }
430#endif /* LOG_ENABLED */
431
432 /*
433 * Release logging.
434 */
435 PRTLOGGER pRelLogger = RTLogRelGetDefaultInstance();
436 if (pRelLogger)
437 {
438#ifdef VBOX_WITH_RC_RELEASE_LOGGING
439 /*
440 * Allocate RC release logger instances (finalized in the relocator).
441 */
442 if (VM_IS_RAW_MODE_ENABLED(pVM))
443 {
444 pVM->vmm.s.cbRCRelLogger = RT_UOFFSETOF_DYN(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
445 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
446 if (RT_FAILURE(rc))
447 return rc;
448 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
449 }
450#endif
451
452 /*
453 * Ring-0 release logger.
454 */
455 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
456 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
457 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerWrapper not found! rc=%Rra\n", rc), rc);
458
459 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
460 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
461 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerFlush not found! rc=%Rra\n", rc), rc);
462
463 size_t const cbLogger = RTLogCalcSizeForR0(pRelLogger->cGroups, 0);
464
465 for (VMCPUID i = 0; i < pVM->cCpus; i++)
466 {
467 PVMCPU pVCpu = &pVM->aCpus[i];
468 rc = MMR3HyperAllocOnceNoRelEx(pVM, cbLogger, PAGE_SIZE, MM_TAG_VMM, MMHYPER_AONR_FLAGS_KERNEL_MAPPING,
469 (void **)&pVCpu->vmm.s.pR0RelLoggerR3);
470 if (RT_FAILURE(rc))
471 return rc;
472 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR3;
473 RTR0PTR R0PtrVmmLogger = MMHyperR3ToR0(pVM, pVmmLogger);
474 pVCpu->vmm.s.pR0RelLoggerR0 = R0PtrVmmLogger;
475 pVmmLogger->pVM = pVM->pVMR0;
476 pVmmLogger->cbLogger = (uint32_t)cbLogger;
477 pVmmLogger->fCreated = false;
478 pVmmLogger->fFlushingDisabled = false;
479 pVmmLogger->fRegistered = false;
480 pVmmLogger->idCpu = i;
481
482 char szR0ThreadName[16];
483 RTStrPrintf(szR0ThreadName, sizeof(szR0ThreadName), "EMT-%u-R0", i);
484 rc = RTLogCreateForR0(&pVmmLogger->Logger, pVmmLogger->cbLogger, R0PtrVmmLogger + RT_UOFFSETOF(VMMR0LOGGER, Logger),
485 pfnLoggerWrapper, pfnLoggerFlush,
486 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY, szR0ThreadName);
487 AssertReleaseMsgRCReturn(rc, ("RTLogCreateForR0 failed! rc=%Rra\n", rc), rc);
488
489 /* We only update the release log instance here. */
490 rc = RTLogCopyGroupsAndFlagsForR0(&pVmmLogger->Logger, R0PtrVmmLogger + RT_UOFFSETOF(VMMR0LOGGER, Logger),
491 pRelLogger, RTLOGFLAGS_BUFFERED, UINT32_MAX);
492 AssertReleaseMsgRCReturn(rc, ("RTLogCopyGroupsAndFlagsForR0 failed! rc=%Rra\n", rc), rc);
493
494 pVmmLogger->fCreated = true;
495 }
496 }
497
498 return VINF_SUCCESS;
499}
500
501
502/**
503 * VMMR3Init worker that register the statistics with STAM.
504 *
505 * @param pVM The cross context VM structure.
506 */
507static void vmmR3InitRegisterStats(PVM pVM)
508{
509 RT_NOREF_PV(pVM);
510
511 /*
512 * Statistics.
513 */
514 STAM_REG(pVM, &pVM->vmm.s.StatRunRC, STAMTYPE_COUNTER, "/VMM/RunRC", STAMUNIT_OCCURENCES, "Number of context switches.");
515 STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
516 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
517 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
518 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
519 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
520 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
521 STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
522 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
523 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
524 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
525 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_READ returns.");
526 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_WRITE returns.");
527 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_COMMIT_WRITE returns.");
528 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ returns.");
529 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_WRITE returns.");
530 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_COMMIT_WRITE returns.");
531 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ_WRITE returns.");
532 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
533 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
534 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRRead, STAMTYPE_COUNTER, "/VMM/RZRet/MSRRead", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_READ returns.");
535 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MSRWrite", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_WRITE returns.");
536 STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
537 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
538 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
539 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
540 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
541 STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
542 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
543 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
544 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
545 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
546 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
547 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
548 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Total, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
549 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Unknown, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Unknown", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns without responsible force flag.");
550 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3FF, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TO_R3.");
551 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3TMVirt, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/TMVirt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_TM_VIRTUAL_SYNC.");
552 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3HandyPages, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Handy", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PGM_NEED_HANDY_PAGES.");
553 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3PDMQueues, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/PDMQueue", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_QUEUES.");
554 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Rendezvous, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Rendezvous", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_EMT_RENDEZVOUS.");
555 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Timer, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Timer", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TIMER.");
556 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3DMA, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/DMA", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_DMA.");
557 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3CritSect, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/CritSect", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_PDM_CRITSECT.");
558 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iem, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IEM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IEM.");
559 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iom, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IOM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IOM.");
560 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
561 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
562 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
563 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
564 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
565 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
566 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
567 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
568 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_LOCK calls.");
569 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMCritSectEnter, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMCritSectEnter", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_CRITSECT_ENTER calls.");
570 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_LOCK calls.");
571 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMPoolGrow", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_POOL_GROW calls.");
572 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_MAP_CHUNK calls.");
573 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES calls.");
574 STAM_REG(pVM, &pVM->vmm.s.StatRZCallRemReplay, STAMTYPE_COUNTER, "/VMM/RZCallR3/REMReplay", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS calls.");
575 STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VMM_LOGGER_FLUSH calls.");
576 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_ERROR calls.");
577 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_RUNTIME_ERROR calls.");
578
579#ifdef VBOX_WITH_STATISTICS
580 for (VMCPUID i = 0; i < pVM->cCpus; i++)
581 {
582 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedMax, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i);
583 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedAvg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Average stack usage.", "/VMM/Stack/CPU%u/Avg", i);
584 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cUsedTotal, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of stack usages.", "/VMM/Stack/CPU%u/Uses", i);
585 }
586#endif
587 for (VMCPUID i = 0; i < pVM->cCpus; i++)
588 {
589 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlock", i);
590 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOnTime", i);
591 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOverslept", i);
592 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockInsomnia", i);
593 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltExec, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec", i);
594 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltExecFromSpin, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromSpin", i);
595 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltExecFromBlock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromBlock", i);
596 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.cR0Halts, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryCounter", i);
597 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.cR0HaltsSucceeded, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistorySucceeded", i);
598 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.cR0HaltsToRing3, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryToRing3", i);
599 }
600}
601
602
603/**
604 * Worker for VMMR3InitR0 that calls ring-0 to do EMT specific initialization.
605 *
606 * @returns VBox status code.
607 * @param pVM The cross context VM structure.
608 * @param pVCpu The cross context per CPU structure.
609 * @thread EMT(pVCpu)
610 */
611static DECLCALLBACK(int) vmmR3InitR0Emt(PVM pVM, PVMCPU pVCpu)
612{
613 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_VMMR0_INIT_EMT, 0, NULL);
614}
615
616
617/**
618 * Initializes the R0 VMM.
619 *
620 * @returns VBox status code.
621 * @param pVM The cross context VM structure.
622 */
623VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
624{
625 int rc;
626 PVMCPU pVCpu = VMMGetCpu(pVM);
627 Assert(pVCpu && pVCpu->idCpu == 0);
628
629#ifdef LOG_ENABLED
630 /*
631 * Initialize the ring-0 logger if we haven't done so yet.
632 */
633 if ( pVCpu->vmm.s.pR0LoggerR3
634 && !pVCpu->vmm.s.pR0LoggerR3->fCreated)
635 {
636 rc = VMMR3UpdateLoggers(pVM);
637 if (RT_FAILURE(rc))
638 return rc;
639 }
640#endif
641
642 /*
643 * Call Ring-0 entry with init code.
644 */
645 for (;;)
646 {
647#ifdef NO_SUPCALLR0VMM
648 //rc = VERR_GENERAL_FAILURE;
649 rc = VINF_SUCCESS;
650#else
651 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
652#endif
653 /*
654 * Flush the logs.
655 */
656#ifdef LOG_ENABLED
657 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0LoggerR3, NULL);
658#endif
659 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0RelLoggerR3, RTLogRelGetDefaultInstance());
660 if (rc != VINF_VMM_CALL_HOST)
661 break;
662 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
663 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
664 break;
665 /* Resume R0 */
666 }
667
668 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
669 {
670 LogRel(("VMM: R0 init failed, rc=%Rra\n", rc));
671 if (RT_SUCCESS(rc))
672 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
673 }
674
675 /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
676 if (pVM->aCpus[0].vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
677 LogRel(("VMM: Enabled thread-context hooks\n"));
678 else
679 LogRel(("VMM: Thread-context hooks unavailable\n"));
680
681 /* Log RTThreadPreemptIsPendingTrusty() and RTThreadPreemptIsPossible() results. */
682 if (pVM->vmm.s.fIsPreemptPendingApiTrusty)
683 LogRel(("VMM: RTThreadPreemptIsPending() can be trusted\n"));
684 else
685 LogRel(("VMM: Warning! RTThreadPreemptIsPending() cannot be trusted! Need to update kernel info?\n"));
686 if (pVM->vmm.s.fIsPreemptPossible)
687 LogRel(("VMM: Kernel preemption is possible\n"));
688 else
689 LogRel(("VMM: Kernel preemption is not possible it seems\n"));
690
691 /*
692 * Send all EMTs to ring-0 to get their logger initialized.
693 */
694 for (VMCPUID idCpu = 0; RT_SUCCESS(rc) && idCpu < pVM->cCpus; idCpu++)
695 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmmR3InitR0Emt, 2, pVM, &pVM->aCpus[idCpu]);
696
697 return rc;
698}
699
700
701#ifdef VBOX_WITH_RAW_MODE
702/**
703 * Initializes the RC VMM.
704 *
705 * @returns VBox status code.
706 * @param pVM The cross context VM structure.
707 */
708VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM)
709{
710 PVMCPU pVCpu = VMMGetCpu(pVM);
711 Assert(pVCpu && pVCpu->idCpu == 0);
712
713 /* In VMX mode, there's no need to init RC. */
714 if (!VM_IS_RAW_MODE_ENABLED(pVM))
715 return VINF_SUCCESS;
716
717 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
718
719 /*
720 * Call VMMRCInit():
721 * -# resolve the address.
722 * -# setup stackframe and EIP to use the trampoline.
723 * -# do a generic hypervisor call.
724 */
725 RTRCPTR RCPtrEP;
726 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
727 if (RT_SUCCESS(rc))
728 {
729 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
730 uint64_t u64TS = RTTimeProgramStartNanoTS();
731 CPUMPushHyper(pVCpu, RT_HI_U32(u64TS)); /* Param 4: The program startup TS - Hi. */
732 CPUMPushHyper(pVCpu, RT_LO_U32(u64TS)); /* Param 4: The program startup TS - Lo. */
733 CPUMPushHyper(pVCpu, vmmGetBuildType()); /* Param 3: Version argument. */
734 CPUMPushHyper(pVCpu, VMMGetSvnRev()); /* Param 2: Version argument. */
735 CPUMPushHyper(pVCpu, VMMRC_DO_VMMRC_INIT); /* Param 1: Operation. */
736 CPUMPushHyper(pVCpu, pVM->pVMRC); /* Param 0: pVM */
737 CPUMPushHyper(pVCpu, 6 * sizeof(RTRCPTR)); /* trampoline param: stacksize. */
738 CPUMPushHyper(pVCpu, RCPtrEP); /* Call EIP. */
739 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
740 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
741
742 for (;;)
743 {
744#ifdef NO_SUPCALLR0VMM
745 //rc = VERR_GENERAL_FAILURE;
746 rc = VINF_SUCCESS;
747#else
748 rc = SUPR3CallVMMR0(pVM->pVMR0, 0 /* VCPU 0 */, VMMR0_DO_CALL_HYPERVISOR, NULL);
749#endif
750#ifdef LOG_ENABLED
751 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
752 if ( pLogger
753 && pLogger->offScratch > 0)
754 RTLogFlushRC(NULL, pLogger);
755#endif
756#ifdef VBOX_WITH_RC_RELEASE_LOGGING
757 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
758 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
759 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
760#endif
761 if (rc != VINF_VMM_CALL_HOST)
762 break;
763 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
764 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
765 break;
766 }
767
768 /* Don't trigger assertions or guru if raw-mode is unavailable. */
769 if (rc != VERR_SUPDRV_NO_RAW_MODE_HYPER_V_ROOT)
770 {
771 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
772 {
773 VMMR3FatalDump(pVM, pVCpu, rc);
774 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
775 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
776 }
777 AssertRC(rc);
778 }
779 }
780 return rc;
781}
782#endif /* VBOX_WITH_RAW_MODE */
783
784
785/**
786 * Called when an init phase completes.
787 *
788 * @returns VBox status code.
789 * @param pVM The cross context VM structure.
790 * @param enmWhat Which init phase.
791 */
792VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
793{
794 int rc = VINF_SUCCESS;
795
796 switch (enmWhat)
797 {
798 case VMINITCOMPLETED_RING3:
799 {
800 /*
801 * Set page attributes to r/w for stack pages.
802 */
803 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
804 {
805 rc = PGMMapSetPage(pVM, pVM->aCpus[idCpu].vmm.s.pbEMTStackRC, VMM_STACK_SIZE,
806 X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
807 AssertRCReturn(rc, rc);
808 }
809
810 /*
811 * Create the EMT yield timer.
812 */
813 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
814 AssertRCReturn(rc, rc);
815
816 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
817 AssertRCReturn(rc, rc);
818
819#ifdef VBOX_WITH_NMI
820 /*
821 * Map the host APIC into GC - This is AMD/Intel + Host OS specific!
822 */
823 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
824 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
825 AssertRCReturn(rc, rc);
826#endif
827
828#ifdef VBOX_STRICT_VMM_STACK
829 /*
830 * Setup the stack guard pages: Two inaccessible pages at each sides of the
831 * stack to catch over/under-flows.
832 */
833 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
834 {
835 uint8_t *pbEMTStackR3 = pVM->aCpus[idCpu].vmm.s.pbEMTStackR3;
836
837 memset(pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE);
838 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, true /*fSet*/);
839
840 memset(pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
841 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, true /*fSet*/);
842 }
843 pVM->vmm.s.fStackGuardsStationed = true;
844#endif
845 break;
846 }
847
848 case VMINITCOMPLETED_HM:
849 {
850 /*
851 * Disable the periodic preemption timers if we can use the
852 * VMX-preemption timer instead.
853 */
854 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
855 && HMR3IsVmxPreemptionTimerUsed(pVM))
856 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
857 LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
858
859 /*
860 * Last chance for GIM to update its CPUID leaves if it requires
861 * knowledge/information from HM initialization.
862 */
863 rc = GIMR3InitCompleted(pVM);
864 AssertRCReturn(rc, rc);
865
866 /*
867 * CPUM's post-initialization (print CPUIDs).
868 */
869 CPUMR3LogCpuIdAndMsrFeatures(pVM);
870 break;
871 }
872
873 default: /* shuts up gcc */
874 break;
875 }
876
877 return rc;
878}
879
880
881/**
882 * Terminate the VMM bits.
883 *
884 * @returns VBox status code.
885 * @param pVM The cross context VM structure.
886 */
887VMMR3_INT_DECL(int) VMMR3Term(PVM pVM)
888{
889 PVMCPU pVCpu = VMMGetCpu(pVM);
890 Assert(pVCpu && pVCpu->idCpu == 0);
891
892 /*
893 * Call Ring-0 entry with termination code.
894 */
895 int rc;
896 for (;;)
897 {
898#ifdef NO_SUPCALLR0VMM
899 //rc = VERR_GENERAL_FAILURE;
900 rc = VINF_SUCCESS;
901#else
902 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
903#endif
904 /*
905 * Flush the logs.
906 */
907#ifdef LOG_ENABLED
908 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0LoggerR3, NULL);
909#endif
910 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0RelLoggerR3, RTLogRelGetDefaultInstance());
911 if (rc != VINF_VMM_CALL_HOST)
912 break;
913 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
914 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
915 break;
916 /* Resume R0 */
917 }
918 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
919 {
920 LogRel(("VMM: VMMR3Term: R0 term failed, rc=%Rra. (warning)\n", rc));
921 if (RT_SUCCESS(rc))
922 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
923 }
924
925 for (VMCPUID i = 0; i < pVM->cCpus; i++)
926 {
927 RTSemEventDestroy(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
928 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
929 }
930 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
931 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
932 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
933 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
934 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousDone);
935 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
936 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousDoneCaller);
937 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
938 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
939 pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
940 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
941 pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
942 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
943 pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
944 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
945 pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
946
947#ifdef VBOX_STRICT_VMM_STACK
948 /*
949 * Make the two stack guard pages present again.
950 */
951 if (pVM->vmm.s.fStackGuardsStationed)
952 {
953 for (VMCPUID i = 0; i < pVM->cCpus; i++)
954 {
955 uint8_t *pbEMTStackR3 = pVM->aCpus[i].vmm.s.pbEMTStackR3;
956 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, false /*fSet*/);
957 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, false /*fSet*/);
958 }
959 pVM->vmm.s.fStackGuardsStationed = false;
960 }
961#endif
962
963 vmmTermFormatTypes();
964 return rc;
965}
966
967
968/**
969 * Applies relocations to data and code managed by this
970 * component. This function will be called at init and
971 * whenever the VMM need to relocate it self inside the GC.
972 *
973 * The VMM will need to apply relocations to the core code.
974 *
975 * @param pVM The cross context VM structure.
976 * @param offDelta The relocation delta.
977 */
978VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
979{
980 LogFlow(("VMMR3Relocate: offDelta=%RGv\n", offDelta));
981
982 /*
983 * Recalc the RC address.
984 */
985#ifdef VBOX_WITH_RAW_MODE
986 pVM->vmm.s.pvCoreCodeRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pvCoreCodeR3);
987#endif
988
989 /*
990 * The stack.
991 */
992 for (VMCPUID i = 0; i < pVM->cCpus; i++)
993 {
994 PVMCPU pVCpu = &pVM->aCpus[i];
995
996 CPUMSetHyperESP(pVCpu, CPUMGetHyperESP(pVCpu) + offDelta);
997
998 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
999 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
1000 }
1001
1002 /*
1003 * All the switchers.
1004 */
1005 vmmR3SwitcherRelocate(pVM, offDelta);
1006
1007 /*
1008 * Get other RC entry points.
1009 */
1010 if (VM_IS_RAW_MODE_ENABLED(pVM))
1011 {
1012 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
1013 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Rra\n", rc));
1014
1015 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
1016 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Rra\n", rc));
1017 }
1018
1019 /*
1020 * Update the logger.
1021 */
1022 VMMR3UpdateLoggers(pVM);
1023}
1024
1025
1026/**
1027 * Updates the settings for the RC and R0 loggers.
1028 *
1029 * @returns VBox status code.
1030 * @param pVM The cross context VM structure.
1031 */
1032VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
1033{
1034 /*
1035 * Simply clone the logger instance (for RC).
1036 */
1037 int rc = VINF_SUCCESS;
1038 RTRCPTR RCPtrLoggerFlush = 0;
1039
1040 if ( pVM->vmm.s.pRCLoggerR3
1041#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1042 || pVM->vmm.s.pRCRelLoggerR3
1043#endif
1044 )
1045 {
1046 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1047 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &RCPtrLoggerFlush);
1048 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Rra\n", rc));
1049 }
1050
1051 if (pVM->vmm.s.pRCLoggerR3)
1052 {
1053 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1054 RTRCPTR RCPtrLoggerWrapper = 0;
1055 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &RCPtrLoggerWrapper);
1056 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Rra\n", rc));
1057
1058 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
1059 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pRCLoggerR3, pVM->vmm.s.cbRCLogger,
1060 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
1061 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
1062 }
1063
1064#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1065 if (pVM->vmm.s.pRCRelLoggerR3)
1066 {
1067 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1068 RTRCPTR RCPtrLoggerWrapper = 0;
1069 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper);
1070 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Rra\n", rc));
1071
1072 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
1073 rc = RTLogCloneRC(RTLogRelGetDefaultInstance(), pVM->vmm.s.pRCRelLoggerR3, pVM->vmm.s.cbRCRelLogger,
1074 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
1075 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
1076 }
1077#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
1078
1079#ifdef LOG_ENABLED
1080 /*
1081 * For the ring-0 EMT logger, we use a per-thread logger instance
1082 * in ring-0. Only initialize it once.
1083 */
1084 PRTLOGGER const pDefault = RTLogDefaultInstance();
1085 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1086 {
1087 PVMCPU pVCpu = &pVM->aCpus[i];
1088 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3;
1089 if (pR0LoggerR3)
1090 {
1091 if (!pR0LoggerR3->fCreated)
1092 {
1093 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
1094 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
1095 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerWrapper not found! rc=%Rra\n", rc), rc);
1096
1097 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
1098 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
1099 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerFlush not found! rc=%Rra\n", rc), rc);
1100
1101 char szR0ThreadName[16];
1102 RTStrPrintf(szR0ThreadName, sizeof(szR0ThreadName), "EMT-%u-R0", i);
1103 rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger,
1104 pVCpu->vmm.s.pR0LoggerR0 + RT_UOFFSETOF(VMMR0LOGGER, Logger),
1105 pfnLoggerWrapper, pfnLoggerFlush,
1106 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY, szR0ThreadName);
1107 AssertReleaseMsgRCReturn(rc, ("RTLogCreateForR0 failed! rc=%Rra\n", rc), rc);
1108
1109 pR0LoggerR3->idCpu = i;
1110 pR0LoggerR3->fCreated = true;
1111 pR0LoggerR3->fFlushingDisabled = false;
1112 }
1113
1114 rc = RTLogCopyGroupsAndFlagsForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_UOFFSETOF(VMMR0LOGGER, Logger),
1115 pDefault, RTLOGFLAGS_BUFFERED, UINT32_MAX);
1116 AssertRC(rc);
1117 }
1118 }
1119#endif
1120 return rc;
1121}
1122
1123
1124/**
1125 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg1Weak output.
1126 *
1127 * @returns Pointer to the buffer.
1128 * @param pVM The cross context VM structure.
1129 */
1130VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
1131{
1132 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1133 return pVM->vmm.s.szRing0AssertMsg1;
1134
1135 RTRCPTR RCPtr;
1136 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg1", &RCPtr);
1137 if (RT_SUCCESS(rc))
1138 return (const char *)MMHyperRCToR3(pVM, RCPtr);
1139
1140 return NULL;
1141}
1142
1143
1144/**
1145 * Returns the VMCPU of the specified virtual CPU.
1146 *
1147 * @returns The VMCPU pointer. NULL if @a idCpu or @a pUVM is invalid.
1148 *
1149 * @param pUVM The user mode VM handle.
1150 * @param idCpu The ID of the virtual CPU.
1151 */
1152VMMR3DECL(PVMCPU) VMMR3GetCpuByIdU(PUVM pUVM, RTCPUID idCpu)
1153{
1154 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
1155 AssertReturn(idCpu < pUVM->cCpus, NULL);
1156 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
1157 return &pUVM->pVM->aCpus[idCpu];
1158}
1159
1160
1161/**
1162 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg2Weak output.
1163 *
1164 * @returns Pointer to the buffer.
1165 * @param pVM The cross context VM structure.
1166 */
1167VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
1168{
1169 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1170 return pVM->vmm.s.szRing0AssertMsg2;
1171
1172 RTRCPTR RCPtr;
1173 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg2", &RCPtr);
1174 if (RT_SUCCESS(rc))
1175 return (const char *)MMHyperRCToR3(pVM, RCPtr);
1176
1177 return NULL;
1178}
1179
1180
1181/**
1182 * Execute state save operation.
1183 *
1184 * @returns VBox status code.
1185 * @param pVM The cross context VM structure.
1186 * @param pSSM SSM operation handle.
1187 */
1188static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1189{
1190 LogFlow(("vmmR3Save:\n"));
1191
1192 /*
1193 * Save the started/stopped state of all CPUs except 0 as it will always
1194 * be running. This avoids breaking the saved state version. :-)
1195 */
1196 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1197 SSMR3PutBool(pSSM, VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(&pVM->aCpus[i])));
1198
1199 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
1200}
1201
1202
1203/**
1204 * Execute state load operation.
1205 *
1206 * @returns VBox status code.
1207 * @param pVM The cross context VM structure.
1208 * @param pSSM SSM operation handle.
1209 * @param uVersion Data layout version.
1210 * @param uPass The data pass.
1211 */
1212static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1213{
1214 LogFlow(("vmmR3Load:\n"));
1215 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1216
1217 /*
1218 * Validate version.
1219 */
1220 if ( uVersion != VMM_SAVED_STATE_VERSION
1221 && uVersion != VMM_SAVED_STATE_VERSION_3_0)
1222 {
1223 AssertMsgFailed(("vmmR3Load: Invalid version uVersion=%u!\n", uVersion));
1224 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1225 }
1226
1227 if (uVersion <= VMM_SAVED_STATE_VERSION_3_0)
1228 {
1229 /* Ignore the stack bottom, stack pointer and stack bits. */
1230 RTRCPTR RCPtrIgnored;
1231 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1232 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1233#ifdef RT_OS_DARWIN
1234 if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(3,0,0)
1235 && SSMR3HandleVersion(pSSM) < VBOX_FULL_VERSION_MAKE(3,1,0)
1236 && SSMR3HandleRevision(pSSM) >= 48858
1237 && ( !strcmp(SSMR3HandleHostOSAndArch(pSSM), "darwin.x86")
1238 || !strcmp(SSMR3HandleHostOSAndArch(pSSM), "") )
1239 )
1240 SSMR3Skip(pSSM, 16384);
1241 else
1242 SSMR3Skip(pSSM, 8192);
1243#else
1244 SSMR3Skip(pSSM, 8192);
1245#endif
1246 }
1247
1248 /*
1249 * Restore the VMCPU states. VCPU 0 is always started.
1250 */
1251 VMCPU_SET_STATE(&pVM->aCpus[0], VMCPUSTATE_STARTED);
1252 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1253 {
1254 bool fStarted;
1255 int rc = SSMR3GetBool(pSSM, &fStarted);
1256 if (RT_FAILURE(rc))
1257 return rc;
1258 VMCPU_SET_STATE(&pVM->aCpus[i], fStarted ? VMCPUSTATE_STARTED : VMCPUSTATE_STOPPED);
1259 }
1260
1261 /* terminator */
1262 uint32_t u32;
1263 int rc = SSMR3GetU32(pSSM, &u32);
1264 if (RT_FAILURE(rc))
1265 return rc;
1266 if (u32 != UINT32_MAX)
1267 {
1268 AssertMsgFailed(("u32=%#x\n", u32));
1269 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1270 }
1271 return VINF_SUCCESS;
1272}
1273
1274
1275#ifdef VBOX_WITH_RAW_MODE
1276/**
1277 * Resolve a builtin RC symbol.
1278 *
1279 * Called by PDM when loading or relocating RC modules.
1280 *
1281 * @returns VBox status
1282 * @param pVM The cross context VM structure.
1283 * @param pszSymbol Symbol to resolve.
1284 * @param pRCPtrValue Where to store the symbol value.
1285 *
1286 * @remark This has to work before VMMR3Relocate() is called.
1287 */
1288VMMR3_INT_DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue)
1289{
1290 if (!strcmp(pszSymbol, "g_Logger"))
1291 {
1292 if (pVM->vmm.s.pRCLoggerR3)
1293 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
1294 *pRCPtrValue = pVM->vmm.s.pRCLoggerRC;
1295 }
1296 else if (!strcmp(pszSymbol, "g_RelLogger"))
1297 {
1298# ifdef VBOX_WITH_RC_RELEASE_LOGGING
1299 if (pVM->vmm.s.pRCRelLoggerR3)
1300 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
1301 *pRCPtrValue = pVM->vmm.s.pRCRelLoggerRC;
1302# else
1303 *pRCPtrValue = NIL_RTRCPTR;
1304# endif
1305 }
1306 else
1307 return VERR_SYMBOL_NOT_FOUND;
1308 return VINF_SUCCESS;
1309}
1310#endif /* VBOX_WITH_RAW_MODE */
1311
1312
1313/**
1314 * Suspends the CPU yielder.
1315 *
1316 * @param pVM The cross context VM structure.
1317 */
1318VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM)
1319{
1320 VMCPU_ASSERT_EMT(&pVM->aCpus[0]);
1321 if (!pVM->vmm.s.cYieldResumeMillies)
1322 {
1323 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1324 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1325 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1326 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1327 else
1328 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1329 TMTimerStop(pVM->vmm.s.pYieldTimer);
1330 }
1331 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1332}
1333
1334
1335/**
1336 * Stops the CPU yielder.
1337 *
1338 * @param pVM The cross context VM structure.
1339 */
1340VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM)
1341{
1342 if (!pVM->vmm.s.cYieldResumeMillies)
1343 TMTimerStop(pVM->vmm.s.pYieldTimer);
1344 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1345 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1346}
1347
1348
1349/**
1350 * Resumes the CPU yielder when it has been a suspended or stopped.
1351 *
1352 * @param pVM The cross context VM structure.
1353 */
1354VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM)
1355{
1356 if (pVM->vmm.s.cYieldResumeMillies)
1357 {
1358 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1359 pVM->vmm.s.cYieldResumeMillies = 0;
1360 }
1361}
1362
1363
1364/**
1365 * Internal timer callback function.
1366 *
1367 * @param pVM The cross context VM structure.
1368 * @param pTimer The timer handle.
1369 * @param pvUser User argument specified upon timer creation.
1370 */
1371static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1372{
1373 NOREF(pvUser);
1374
1375 /*
1376 * This really needs some careful tuning. While we shouldn't be too greedy since
1377 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1378 * because that'll cause us to stop up.
1379 *
1380 * The current logic is to use the default interval when there is no lag worth
1381 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1382 *
1383 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1384 * so the lag is up to date.)
1385 */
1386 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1387 if ( u64Lag < 50000000 /* 50ms */
1388 || ( u64Lag < 1000000000 /* 1s */
1389 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1390 )
1391 {
1392 uint64_t u64Elapsed = RTTimeNanoTS();
1393 pVM->vmm.s.u64LastYield = u64Elapsed;
1394
1395 RTThreadYield();
1396
1397#ifdef LOG_ENABLED
1398 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1399 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1400#endif
1401 }
1402 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1403}
1404
1405
1406#ifdef VBOX_WITH_RAW_MODE
1407/**
1408 * Executes guest code in the raw-mode context.
1409 *
1410 * @param pVM The cross context VM structure.
1411 * @param pVCpu The cross context virtual CPU structure.
1412 */
1413VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu)
1414{
1415 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1416
1417 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1418
1419 /*
1420 * Set the hypervisor to resume executing a CPUM resume function
1421 * in CPUMRCA.asm.
1422 */
1423 CPUMSetHyperState(pVCpu,
1424 CPUMGetGuestEFlags(pVCpu) & X86_EFL_VM
1425 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86
1426 : pVM->vmm.s.pfnCPUMRCResumeGuest, /* eip */
1427 pVCpu->vmm.s.pbEMTStackBottomRC, /* esp */
1428 0, /* eax */
1429 VM_RC_ADDR(pVM, &pVCpu->cpum) /* edx */);
1430
1431 /*
1432 * We hide log flushes (outer) and hypervisor interrupts (inner).
1433 */
1434 for (;;)
1435 {
1436#ifdef VBOX_STRICT
1437 if (RT_UNLIKELY(!CPUMGetHyperCR3(pVCpu) || CPUMGetHyperCR3(pVCpu) != PGMGetHyperCR3(pVCpu)))
1438 EMR3FatalError(pVCpu, VERR_VMM_HYPER_CR3_MISMATCH);
1439 PGMMapCheck(pVM);
1440# ifdef VBOX_WITH_SAFE_STR
1441 SELMR3CheckShadowTR(pVM);
1442# endif
1443#endif
1444 int rc;
1445 do
1446 {
1447#ifdef NO_SUPCALLR0VMM
1448 rc = VERR_GENERAL_FAILURE;
1449#else
1450 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1451 if (RT_LIKELY(rc == VINF_SUCCESS))
1452 rc = pVCpu->vmm.s.iLastGZRc;
1453#endif
1454 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1455
1456 /*
1457 * Flush the logs.
1458 */
1459#ifdef LOG_ENABLED
1460 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1461 if ( pLogger
1462 && pLogger->offScratch > 0)
1463 RTLogFlushRC(NULL, pLogger);
1464#endif
1465#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1466 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1467 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1468 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
1469#endif
1470 if (rc != VINF_VMM_CALL_HOST)
1471 {
1472 Log2(("VMMR3RawRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1473 return rc;
1474 }
1475 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1476 if (RT_FAILURE(rc))
1477 return rc;
1478 /* Resume GC */
1479 }
1480}
1481#endif /* VBOX_WITH_RAW_MODE */
1482
1483
1484/**
1485 * Executes guest code (Intel VT-x and AMD-V).
1486 *
1487 * @param pVM The cross context VM structure.
1488 * @param pVCpu The cross context virtual CPU structure.
1489 */
1490VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu)
1491{
1492 Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1493
1494 for (;;)
1495 {
1496 int rc;
1497 do
1498 {
1499#ifdef NO_SUPCALLR0VMM
1500 rc = VERR_GENERAL_FAILURE;
1501#else
1502 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, pVCpu->idCpu);
1503 if (RT_LIKELY(rc == VINF_SUCCESS))
1504 rc = pVCpu->vmm.s.iLastGZRc;
1505#endif
1506 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1507
1508#if 0 /** @todo triggers too often */
1509 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
1510#endif
1511
1512 /*
1513 * Flush the logs
1514 */
1515#ifdef LOG_ENABLED
1516 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0LoggerR3, NULL);
1517#endif
1518 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0RelLoggerR3, RTLogRelGetDefaultInstance());
1519 if (rc != VINF_VMM_CALL_HOST)
1520 {
1521 Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1522 return rc;
1523 }
1524 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1525 if (RT_FAILURE(rc))
1526 return rc;
1527 /* Resume R0 */
1528 }
1529}
1530
1531
1532/**
1533 * Perform one of the fast I/O control VMMR0 operation.
1534 *
1535 * @returns VBox strict status code.
1536 * @param pVM The cross context VM structure.
1537 * @param pVCpu The cross context virtual CPU structure.
1538 * @param enmOperation The operation to perform.
1539 */
1540VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation)
1541{
1542 for (;;)
1543 {
1544 VBOXSTRICTRC rcStrict;
1545 do
1546 {
1547#ifdef NO_SUPCALLR0VMM
1548 rcStrict = VERR_GENERAL_FAILURE;
1549#else
1550 rcStrict = SUPR3CallVMMR0Fast(pVM->pVMR0, enmOperation, pVCpu->idCpu);
1551 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1552 rcStrict = pVCpu->vmm.s.iLastGZRc;
1553#endif
1554 } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
1555
1556 /*
1557 * Flush the logs
1558 */
1559#ifdef LOG_ENABLED
1560 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0LoggerR3, NULL);
1561#endif
1562 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0RelLoggerR3, RTLogRelGetDefaultInstance());
1563 if (rcStrict != VINF_VMM_CALL_HOST)
1564 return rcStrict;
1565 int rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1566 if (RT_FAILURE(rc))
1567 return rc;
1568 /* Resume R0 */
1569 }
1570}
1571
1572
1573/**
1574 * VCPU worker for VMMR3SendStartupIpi.
1575 *
1576 * @param pVM The cross context VM structure.
1577 * @param idCpu Virtual CPU to perform SIPI on.
1578 * @param uVector The SIPI vector.
1579 */
1580static DECLCALLBACK(int) vmmR3SendStarupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1581{
1582 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1583 VMCPU_ASSERT_EMT(pVCpu);
1584
1585 /*
1586 * In the INIT state, the target CPU is only responsive to an SIPI.
1587 * This is also true for when when the CPU is in VMX non-root mode.
1588 *
1589 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)".
1590 * See Intel spec. 26.6.2 "Activity State".
1591 */
1592 if (EMGetState(pVCpu) != EMSTATE_WAIT_SIPI)
1593 return VINF_SUCCESS;
1594
1595 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1596#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1597 if (CPUMIsGuestInVmxRootMode(pCtx))
1598 {
1599 /* If the CPU is in VMX non-root mode we must cause a VM-exit. */
1600 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1601 return VBOXSTRICTRC_TODO(IEMExecVmxVmexitStartupIpi(pVCpu, uVector));
1602
1603 /* If the CPU is in VMX root mode (and not in VMX non-root mode) SIPIs are blocked. */
1604 return VINF_SUCCESS;
1605 }
1606#endif
1607
1608 pCtx->cs.Sel = uVector << 8;
1609 pCtx->cs.ValidSel = uVector << 8;
1610 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1611 pCtx->cs.u64Base = uVector << 12;
1612 pCtx->cs.u32Limit = UINT32_C(0x0000ffff);
1613 pCtx->rip = 0;
1614
1615 Log(("vmmR3SendSipi for VCPU %d with vector %x\n", idCpu, uVector));
1616
1617# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
1618 EMSetState(pVCpu, EMSTATE_HALTED);
1619 return VINF_EM_RESCHEDULE;
1620# else /* And if we go the VMCPU::enmState way it can stay here. */
1621 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STOPPED);
1622 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1623 return VINF_SUCCESS;
1624# endif
1625}
1626
1627
1628/**
1629 * VCPU worker for VMMR3SendInitIpi.
1630 *
1631 * @returns VBox status code.
1632 * @param pVM The cross context VM structure.
1633 * @param idCpu Virtual CPU to perform SIPI on.
1634 */
1635static DECLCALLBACK(int) vmmR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1636{
1637 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1638 VMCPU_ASSERT_EMT(pVCpu);
1639
1640 Log(("vmmR3SendInitIpi for VCPU %d\n", idCpu));
1641
1642 /** @todo r=ramshankar: We should probably block INIT signal when the CPU is in
1643 * wait-for-SIPI state. Verify. */
1644
1645 /* If the CPU is in VMX non-root mode, INIT signals cause VM-exits. */
1646#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1647 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1648 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1649 return VBOXSTRICTRC_TODO(IEMExecVmxVmexitInitIpi(pVCpu));
1650#endif
1651
1652 /** @todo Figure out how to handle a SVM nested-guest intercepts here for INIT
1653 * IPI (e.g. SVM_EXIT_INIT). */
1654
1655 PGMR3ResetCpu(pVM, pVCpu);
1656 PDMR3ResetCpu(pVCpu); /* Only clears pending interrupts force flags */
1657 APICR3InitIpi(pVCpu);
1658 TRPMR3ResetCpu(pVCpu);
1659 CPUMR3ResetCpu(pVM, pVCpu);
1660 EMR3ResetCpu(pVCpu);
1661 HMR3ResetCpu(pVCpu);
1662 NEMR3ResetCpu(pVCpu, true /*fInitIpi*/);
1663
1664 /* This will trickle up on the target EMT. */
1665 return VINF_EM_WAIT_SIPI;
1666}
1667
1668
1669/**
1670 * Sends a Startup IPI to the virtual CPU by setting CS:EIP into
1671 * vector-dependent state and unhalting processor.
1672 *
1673 * @param pVM The cross context VM structure.
1674 * @param idCpu Virtual CPU to perform SIPI on.
1675 * @param uVector SIPI vector.
1676 */
1677VMMR3_INT_DECL(void) VMMR3SendStartupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1678{
1679 AssertReturnVoid(idCpu < pVM->cCpus);
1680
1681 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendStarupIpi, 3, pVM, idCpu, uVector);
1682 AssertRC(rc);
1683}
1684
1685
1686/**
1687 * Sends init IPI to the virtual CPU.
1688 *
1689 * @param pVM The cross context VM structure.
1690 * @param idCpu Virtual CPU to perform int IPI on.
1691 */
1692VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1693{
1694 AssertReturnVoid(idCpu < pVM->cCpus);
1695
1696 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
1697 AssertRC(rc);
1698}
1699
1700
1701/**
1702 * Registers the guest memory range that can be used for patching.
1703 *
1704 * @returns VBox status code.
1705 * @param pVM The cross context VM structure.
1706 * @param pPatchMem Patch memory range.
1707 * @param cbPatchMem Size of the memory range.
1708 */
1709VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1710{
1711 VM_ASSERT_EMT(pVM);
1712 if (HMIsEnabled(pVM))
1713 return HMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
1714
1715 return VERR_NOT_SUPPORTED;
1716}
1717
1718
1719/**
1720 * Deregisters the guest memory range that can be used for patching.
1721 *
1722 * @returns VBox status code.
1723 * @param pVM The cross context VM structure.
1724 * @param pPatchMem Patch memory range.
1725 * @param cbPatchMem Size of the memory range.
1726 */
1727VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1728{
1729 if (HMIsEnabled(pVM))
1730 return HMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
1731
1732 return VINF_SUCCESS;
1733}
1734
1735
1736/**
1737 * Common recursion handler for the other EMTs.
1738 *
1739 * @returns Strict VBox status code.
1740 * @param pVM The cross context VM structure.
1741 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1742 * @param rcStrict Current status code to be combined with the one
1743 * from this recursion and returned.
1744 */
1745static VBOXSTRICTRC vmmR3EmtRendezvousCommonRecursion(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
1746{
1747 int rc2;
1748
1749 /*
1750 * We wait here while the initiator of this recursion reconfigures
1751 * everything. The last EMT to get in signals the initiator.
1752 */
1753 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) == pVM->cCpus)
1754 {
1755 rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
1756 AssertLogRelRC(rc2);
1757 }
1758
1759 rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPush, RT_INDEFINITE_WAIT);
1760 AssertLogRelRC(rc2);
1761
1762 /*
1763 * Do the normal rendezvous processing.
1764 */
1765 VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1766 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1767
1768 /*
1769 * Wait for the initiator to restore everything.
1770 */
1771 rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPop, RT_INDEFINITE_WAIT);
1772 AssertLogRelRC(rc2);
1773
1774 /*
1775 * Last thread out of here signals the initiator.
1776 */
1777 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) == pVM->cCpus)
1778 {
1779 rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
1780 AssertLogRelRC(rc2);
1781 }
1782
1783 /*
1784 * Merge status codes and return.
1785 */
1786 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
1787 if ( rcStrict2 != VINF_SUCCESS
1788 && ( rcStrict == VINF_SUCCESS
1789 || rcStrict > rcStrict2))
1790 rcStrict = rcStrict2;
1791 return rcStrict;
1792}
1793
1794
1795/**
1796 * Count returns and have the last non-caller EMT wake up the caller.
1797 *
1798 * @returns VBox strict informational status code for EM scheduling. No failures
1799 * will be returned here, those are for the caller only.
1800 *
1801 * @param pVM The cross context VM structure.
1802 * @param rcStrict The current accumulated recursive status code,
1803 * to be merged with i32RendezvousStatus and
1804 * returned.
1805 */
1806DECL_FORCE_INLINE(VBOXSTRICTRC) vmmR3EmtRendezvousNonCallerReturn(PVM pVM, VBOXSTRICTRC rcStrict)
1807{
1808 VBOXSTRICTRC rcStrict2 = ASMAtomicReadS32(&pVM->vmm.s.i32RendezvousStatus);
1809
1810 uint32_t cReturned = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsReturned);
1811 if (cReturned == pVM->cCpus - 1U)
1812 {
1813 int rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
1814 AssertLogRelRC(rc);
1815 }
1816
1817 /*
1818 * Merge the status codes, ignoring error statuses in this code path.
1819 */
1820 AssertLogRelMsgReturn( rcStrict2 <= VINF_SUCCESS
1821 || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
1822 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1823 VERR_IPE_UNEXPECTED_INFO_STATUS);
1824
1825 if (RT_SUCCESS(rcStrict2))
1826 {
1827 if ( rcStrict2 != VINF_SUCCESS
1828 && ( rcStrict == VINF_SUCCESS
1829 || rcStrict > rcStrict2))
1830 rcStrict = rcStrict2;
1831 }
1832 return rcStrict;
1833}
1834
1835
1836/**
1837 * Common worker for VMMR3EmtRendezvous and VMMR3EmtRendezvousFF.
1838 *
1839 * @returns VBox strict informational status code for EM scheduling. No failures
1840 * will be returned here, those are for the caller only. When
1841 * fIsCaller is set, VINF_SUCCESS is always returned.
1842 *
1843 * @param pVM The cross context VM structure.
1844 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1845 * @param fIsCaller Whether we're the VMMR3EmtRendezvous caller or
1846 * not.
1847 * @param fFlags The flags.
1848 * @param pfnRendezvous The callback.
1849 * @param pvUser The user argument for the callback.
1850 */
1851static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
1852 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1853{
1854 int rc;
1855 VBOXSTRICTRC rcStrictRecursion = VINF_SUCCESS;
1856
1857 /*
1858 * Enter, the last EMT triggers the next callback phase.
1859 */
1860 uint32_t cEntered = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsEntered);
1861 if (cEntered != pVM->cCpus)
1862 {
1863 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1864 {
1865 /* Wait for our turn. */
1866 for (;;)
1867 {
1868 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, RT_INDEFINITE_WAIT);
1869 AssertLogRelRC(rc);
1870 if (!pVM->vmm.s.fRendezvousRecursion)
1871 break;
1872 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1873 }
1874 }
1875 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1876 {
1877 /* Wait for the last EMT to arrive and wake everyone up. */
1878 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce, RT_INDEFINITE_WAIT);
1879 AssertLogRelRC(rc);
1880 Assert(!pVM->vmm.s.fRendezvousRecursion);
1881 }
1882 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1883 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1884 {
1885 /* Wait for our turn. */
1886 for (;;)
1887 {
1888 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1889 AssertLogRelRC(rc);
1890 if (!pVM->vmm.s.fRendezvousRecursion)
1891 break;
1892 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1893 }
1894 }
1895 else
1896 {
1897 Assert((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE);
1898
1899 /*
1900 * The execute once is handled specially to optimize the code flow.
1901 *
1902 * The last EMT to arrive will perform the callback and the other
1903 * EMTs will wait on the Done/DoneCaller semaphores (instead of
1904 * the EnterOneByOne/AllAtOnce) in the meanwhile. When the callback
1905 * returns, that EMT will initiate the normal return sequence.
1906 */
1907 if (!fIsCaller)
1908 {
1909 for (;;)
1910 {
1911 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1912 AssertLogRelRC(rc);
1913 if (!pVM->vmm.s.fRendezvousRecursion)
1914 break;
1915 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1916 }
1917
1918 return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
1919 }
1920 return VINF_SUCCESS;
1921 }
1922 }
1923 else
1924 {
1925 /*
1926 * All EMTs are waiting, clear the FF and take action according to the
1927 * execution method.
1928 */
1929 VM_FF_CLEAR(pVM, VM_FF_EMT_RENDEZVOUS);
1930
1931 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1932 {
1933 /* Wake up everyone. */
1934 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
1935 AssertLogRelRC(rc);
1936 }
1937 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1938 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1939 {
1940 /* Figure out who to wake up and wake it up. If it's ourself, then
1941 it's easy otherwise wait for our turn. */
1942 VMCPUID iFirst = (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1943 ? 0
1944 : pVM->cCpus - 1U;
1945 if (pVCpu->idCpu != iFirst)
1946 {
1947 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iFirst]);
1948 AssertLogRelRC(rc);
1949 for (;;)
1950 {
1951 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1952 AssertLogRelRC(rc);
1953 if (!pVM->vmm.s.fRendezvousRecursion)
1954 break;
1955 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1956 }
1957 }
1958 }
1959 /* else: execute the handler on the current EMT and wake up one or more threads afterwards. */
1960 }
1961
1962
1963 /*
1964 * Do the callback and update the status if necessary.
1965 */
1966 if ( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1967 || RT_SUCCESS(ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus)) )
1968 {
1969 VBOXSTRICTRC rcStrict2 = pfnRendezvous(pVM, pVCpu, pvUser);
1970 if (rcStrict2 != VINF_SUCCESS)
1971 {
1972 AssertLogRelMsg( rcStrict2 <= VINF_SUCCESS
1973 || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
1974 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
1975 int32_t i32RendezvousStatus;
1976 do
1977 {
1978 i32RendezvousStatus = ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus);
1979 if ( rcStrict2 == i32RendezvousStatus
1980 || RT_FAILURE(i32RendezvousStatus)
1981 || ( i32RendezvousStatus != VINF_SUCCESS
1982 && rcStrict2 > i32RendezvousStatus))
1983 break;
1984 } while (!ASMAtomicCmpXchgS32(&pVM->vmm.s.i32RendezvousStatus, VBOXSTRICTRC_VAL(rcStrict2), i32RendezvousStatus));
1985 }
1986 }
1987
1988 /*
1989 * Increment the done counter and take action depending on whether we're
1990 * the last to finish callback execution.
1991 */
1992 uint32_t cDone = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsDone);
1993 if ( cDone != pVM->cCpus
1994 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE)
1995 {
1996 /* Signal the next EMT? */
1997 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1998 {
1999 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
2000 AssertLogRelRC(rc);
2001 }
2002 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
2003 {
2004 Assert(cDone == pVCpu->idCpu + 1U);
2005 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu + 1U]);
2006 AssertLogRelRC(rc);
2007 }
2008 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
2009 {
2010 Assert(pVM->cCpus - cDone == pVCpu->idCpu);
2011 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVM->cCpus - cDone - 1U]);
2012 AssertLogRelRC(rc);
2013 }
2014
2015 /* Wait for the rest to finish (the caller waits on hEvtRendezvousDoneCaller). */
2016 if (!fIsCaller)
2017 {
2018 for (;;)
2019 {
2020 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
2021 AssertLogRelRC(rc);
2022 if (!pVM->vmm.s.fRendezvousRecursion)
2023 break;
2024 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
2025 }
2026 }
2027 }
2028 else
2029 {
2030 /* Callback execution is all done, tell the rest to return. */
2031 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
2032 AssertLogRelRC(rc);
2033 }
2034
2035 if (!fIsCaller)
2036 return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
2037 return rcStrictRecursion;
2038}
2039
2040
2041/**
2042 * Called in response to VM_FF_EMT_RENDEZVOUS.
2043 *
2044 * @returns VBox strict status code - EM scheduling. No errors will be returned
2045 * here, nor will any non-EM scheduling status codes be returned.
2046 *
2047 * @param pVM The cross context VM structure.
2048 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2049 *
2050 * @thread EMT
2051 */
2052VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu)
2053{
2054 Assert(!pVCpu->vmm.s.fInRendezvous);
2055 Log(("VMMR3EmtRendezvousFF: EMT%#u\n", pVCpu->idCpu));
2056 pVCpu->vmm.s.fInRendezvous = true;
2057 VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
2058 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
2059 pVCpu->vmm.s.fInRendezvous = false;
2060 Log(("VMMR3EmtRendezvousFF: EMT%#u returns %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
2061 return VBOXSTRICTRC_TODO(rcStrict);
2062}
2063
2064
2065/**
2066 * Helper for resetting an single wakeup event sempahore.
2067 *
2068 * @returns VERR_TIMEOUT on success, RTSemEventWait status otherwise.
2069 * @param hEvt The event semaphore to reset.
2070 */
2071static int vmmR3HlpResetEvent(RTSEMEVENT hEvt)
2072{
2073 for (uint32_t cLoops = 0; ; cLoops++)
2074 {
2075 int rc = RTSemEventWait(hEvt, 0 /*cMsTimeout*/);
2076 if (rc != VINF_SUCCESS || cLoops > _4K)
2077 return rc;
2078 }
2079}
2080
2081
2082/**
2083 * Worker for VMMR3EmtRendezvous that handles recursion.
2084 *
2085 * @returns VBox strict status code. This will be the first error,
2086 * VINF_SUCCESS, or an EM scheduling status code.
2087 *
2088 * @param pVM The cross context VM structure.
2089 * @param pVCpu The cross context virtual CPU structure of the
2090 * calling EMT.
2091 * @param fFlags Flags indicating execution methods. See
2092 * grp_VMMR3EmtRendezvous_fFlags.
2093 * @param pfnRendezvous The callback.
2094 * @param pvUser User argument for the callback.
2095 *
2096 * @thread EMT(pVCpu)
2097 */
2098static VBOXSTRICTRC vmmR3EmtRendezvousRecursive(PVM pVM, PVMCPU pVCpu, uint32_t fFlags,
2099 PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
2100{
2101 Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d\n", fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions));
2102 AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
2103 Assert(pVCpu->vmm.s.fInRendezvous);
2104
2105 /*
2106 * Save the current state.
2107 */
2108 uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
2109 uint32_t const cParentDone = pVM->vmm.s.cRendezvousEmtsDone;
2110 int32_t const iParentStatus = pVM->vmm.s.i32RendezvousStatus;
2111 PFNVMMEMTRENDEZVOUS const pfnParent = pVM->vmm.s.pfnRendezvous;
2112 void * const pvParentUser = pVM->vmm.s.pvRendezvousUser;
2113
2114 /*
2115 * Check preconditions and save the current state.
2116 */
2117 AssertReturn( (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
2118 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2119 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
2120 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
2121 VERR_INTERNAL_ERROR);
2122 AssertReturn(pVM->vmm.s.cRendezvousEmtsEntered == pVM->cCpus, VERR_INTERNAL_ERROR_2);
2123 AssertReturn(pVM->vmm.s.cRendezvousEmtsReturned == 0, VERR_INTERNAL_ERROR_3);
2124
2125 /*
2126 * Reset the recursion prep and pop semaphores.
2127 */
2128 int rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
2129 AssertLogRelRCReturn(rc, rc);
2130 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
2131 AssertLogRelRCReturn(rc, rc);
2132 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
2133 AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
2134 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
2135 AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
2136
2137 /*
2138 * Usher the other thread into the recursion routine.
2139 */
2140 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush, 0);
2141 ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, true);
2142
2143 uint32_t cLeft = pVM->cCpus - (cParentDone + 1U);
2144 if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
2145 while (cLeft-- > 0)
2146 {
2147 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
2148 AssertLogRelRC(rc);
2149 }
2150 else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
2151 {
2152 Assert(cLeft == pVM->cCpus - (pVCpu->idCpu + 1U));
2153 for (VMCPUID iCpu = pVCpu->idCpu + 1U; iCpu < pVM->cCpus; iCpu++)
2154 {
2155 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu]);
2156 AssertLogRelRC(rc);
2157 }
2158 }
2159 else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
2160 {
2161 Assert(cLeft == pVCpu->idCpu);
2162 for (VMCPUID iCpu = pVCpu->idCpu; iCpu > 0; iCpu--)
2163 {
2164 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu - 1U]);
2165 AssertLogRelRC(rc);
2166 }
2167 }
2168 else
2169 AssertLogRelReturn((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
2170 VERR_INTERNAL_ERROR_4);
2171
2172 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
2173 AssertLogRelRC(rc);
2174 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
2175 AssertLogRelRC(rc);
2176
2177
2178 /*
2179 * Wait for the EMTs to wake up and get out of the parent rendezvous code.
2180 */
2181 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) != pVM->cCpus)
2182 {
2183 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPushCaller, RT_INDEFINITE_WAIT);
2184 AssertLogRelRC(rc);
2185 }
2186
2187 ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, false);
2188
2189 /*
2190 * Clear the slate and setup the new rendezvous.
2191 */
2192 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2193 {
2194 rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
2195 AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2196 }
2197 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2198 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
2199 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
2200 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2201
2202 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
2203 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
2204 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
2205 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
2206 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
2207 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
2208 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
2209 ASMAtomicIncU32(&pVM->vmm.s.cRendezvousRecursions);
2210
2211 /*
2212 * We're ready to go now, do normal rendezvous processing.
2213 */
2214 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
2215 AssertLogRelRC(rc);
2216
2217 VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /*fIsCaller*/, fFlags, pfnRendezvous, pvUser);
2218
2219 /*
2220 * The caller waits for the other EMTs to be done, return and waiting on the
2221 * pop semaphore.
2222 */
2223 for (;;)
2224 {
2225 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
2226 AssertLogRelRC(rc);
2227 if (!pVM->vmm.s.fRendezvousRecursion)
2228 break;
2229 rcStrict = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict);
2230 }
2231
2232 /*
2233 * Get the return code and merge it with the above recursion status.
2234 */
2235 VBOXSTRICTRC rcStrict2 = pVM->vmm.s.i32RendezvousStatus;
2236 if ( rcStrict2 != VINF_SUCCESS
2237 && ( rcStrict == VINF_SUCCESS
2238 || rcStrict > rcStrict2))
2239 rcStrict = rcStrict2;
2240
2241 /*
2242 * Restore the parent rendezvous state.
2243 */
2244 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2245 {
2246 rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
2247 AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2248 }
2249 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2250 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
2251 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
2252 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2253
2254 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, pVM->cCpus);
2255 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
2256 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, cParentDone);
2257 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, iParentStatus);
2258 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fParentFlags);
2259 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvParentUser);
2260 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnParent);
2261
2262 /*
2263 * Usher the other EMTs back to their parent recursion routine, waiting
2264 * for them to all get there before we return (makes sure they've been
2265 * scheduled and are past the pop event sem, see below).
2266 */
2267 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop, 0);
2268 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
2269 AssertLogRelRC(rc);
2270
2271 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) != pVM->cCpus)
2272 {
2273 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPopCaller, RT_INDEFINITE_WAIT);
2274 AssertLogRelRC(rc);
2275 }
2276
2277 /*
2278 * We must reset the pop semaphore on the way out (doing the pop caller too,
2279 * just in case). The parent may be another recursion.
2280 */
2281 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop); AssertLogRelRC(rc);
2282 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2283
2284 ASMAtomicDecU32(&pVM->vmm.s.cRendezvousRecursions);
2285
2286 Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d returns %Rrc\n",
2287 fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions, VBOXSTRICTRC_VAL(rcStrict)));
2288 return rcStrict;
2289}
2290
2291
2292/**
2293 * EMT rendezvous.
2294 *
2295 * Gathers all the EMTs and execute some code on each of them, either in a one
2296 * by one fashion or all at once.
2297 *
2298 * @returns VBox strict status code. This will be the first error,
2299 * VINF_SUCCESS, or an EM scheduling status code.
2300 *
2301 * @retval VERR_DEADLOCK if recursion is attempted using a rendezvous type that
2302 * doesn't support it or if the recursion is too deep.
2303 *
2304 * @param pVM The cross context VM structure.
2305 * @param fFlags Flags indicating execution methods. See
2306 * grp_VMMR3EmtRendezvous_fFlags. The one-by-one,
2307 * descending and ascending rendezvous types support
2308 * recursion from inside @a pfnRendezvous.
2309 * @param pfnRendezvous The callback.
2310 * @param pvUser User argument for the callback.
2311 *
2312 * @thread Any.
2313 */
2314VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
2315{
2316 /*
2317 * Validate input.
2318 */
2319 AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
2320 AssertMsg( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID
2321 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) <= VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2322 && !(fFlags & ~VMMEMTRENDEZVOUS_FLAGS_VALID_MASK), ("%#x\n", fFlags));
2323 AssertMsg( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
2324 || ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE
2325 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE),
2326 ("type %u\n", fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK));
2327
2328 VBOXSTRICTRC rcStrict;
2329 PVMCPU pVCpu = VMMGetCpu(pVM);
2330 if (!pVCpu)
2331 {
2332 /*
2333 * Forward the request to an EMT thread.
2334 */
2335 Log(("VMMR3EmtRendezvous: %#x non-EMT\n", fFlags));
2336 if (!(fFlags & VMMEMTRENDEZVOUS_FLAGS_PRIORITY))
2337 rcStrict = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
2338 else
2339 rcStrict = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
2340 Log(("VMMR3EmtRendezvous: %#x non-EMT returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
2341 }
2342 else if ( pVM->cCpus == 1
2343 || ( pVM->enmVMState == VMSTATE_DESTROYING
2344 && VMR3GetActiveEmts(pVM->pUVM) < pVM->cCpus ) )
2345 {
2346 /*
2347 * Shortcut for the single EMT case.
2348 *
2349 * We also ends up here if EMT(0) (or others) tries to issue a rendezvous
2350 * during vmR3Destroy after other emulation threads have started terminating.
2351 */
2352 if (!pVCpu->vmm.s.fInRendezvous)
2353 {
2354 Log(("VMMR3EmtRendezvous: %#x EMT (uni)\n", fFlags));
2355 pVCpu->vmm.s.fInRendezvous = true;
2356 pVM->vmm.s.fRendezvousFlags = fFlags;
2357 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
2358 pVCpu->vmm.s.fInRendezvous = false;
2359 }
2360 else
2361 {
2362 /* Recursion. Do the same checks as in the SMP case. */
2363 Log(("VMMR3EmtRendezvous: %#x EMT (uni), recursion depth=%d\n", fFlags, pVM->vmm.s.cRendezvousRecursions));
2364 uint32_t fType = pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK;
2365 AssertLogRelReturn( !pVCpu->vmm.s.fInRendezvous
2366 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
2367 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2368 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
2369 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
2370 , VERR_DEADLOCK);
2371
2372 AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
2373 pVM->vmm.s.cRendezvousRecursions++;
2374 uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
2375 pVM->vmm.s.fRendezvousFlags = fFlags;
2376
2377 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
2378
2379 pVM->vmm.s.fRendezvousFlags = fParentFlags;
2380 pVM->vmm.s.cRendezvousRecursions--;
2381 }
2382 Log(("VMMR3EmtRendezvous: %#x EMT (uni) returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
2383 }
2384 else
2385 {
2386 /*
2387 * Spin lock. If busy, check for recursion, if not recursing wait for
2388 * the other EMT to finish while keeping a lookout for the RENDEZVOUS FF.
2389 */
2390 int rc;
2391 rcStrict = VINF_SUCCESS;
2392 if (RT_UNLIKELY(!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)))
2393 {
2394 /* Allow recursion in some cases. */
2395 if ( pVCpu->vmm.s.fInRendezvous
2396 && ( (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
2397 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2398 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
2399 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
2400 ))
2401 return VBOXSTRICTRC_TODO(vmmR3EmtRendezvousRecursive(pVM, pVCpu, fFlags, pfnRendezvous, pvUser));
2402
2403 AssertLogRelMsgReturn(!pVCpu->vmm.s.fInRendezvous, ("fRendezvousFlags=%#x\n", pVM->vmm.s.fRendezvousFlags),
2404 VERR_DEADLOCK);
2405
2406 Log(("VMMR3EmtRendezvous: %#x EMT#%u, waiting for lock...\n", fFlags, pVCpu->idCpu));
2407 while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
2408 {
2409 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2410 {
2411 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
2412 if ( rc != VINF_SUCCESS
2413 && ( rcStrict == VINF_SUCCESS
2414 || rcStrict > rc))
2415 rcStrict = rc;
2416 /** @todo Perhaps deal with termination here? */
2417 }
2418 ASMNopPause();
2419 }
2420 }
2421
2422 Log(("VMMR3EmtRendezvous: %#x EMT#%u\n", fFlags, pVCpu->idCpu));
2423 Assert(!VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS));
2424 Assert(!pVCpu->vmm.s.fInRendezvous);
2425 pVCpu->vmm.s.fInRendezvous = true;
2426
2427 /*
2428 * Clear the slate and setup the rendezvous. This is a semaphore ping-pong orgy. :-)
2429 */
2430 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2431 {
2432 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i], 0);
2433 AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2434 }
2435 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2436 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
2437 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
2438 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2439 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
2440 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
2441 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
2442 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
2443 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
2444 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
2445 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
2446
2447 /*
2448 * Set the FF and poke the other EMTs.
2449 */
2450 VM_FF_SET(pVM, VM_FF_EMT_RENDEZVOUS);
2451 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_POKE);
2452
2453 /*
2454 * Do the same ourselves.
2455 */
2456 VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /* fIsCaller */, fFlags, pfnRendezvous, pvUser);
2457
2458 /*
2459 * The caller waits for the other EMTs to be done and return before doing
2460 * the cleanup. This makes away with wakeup / reset races we would otherwise
2461 * risk in the multiple release event semaphore code (hEvtRendezvousDoneCaller).
2462 */
2463 for (;;)
2464 {
2465 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
2466 AssertLogRelRC(rc);
2467 if (!pVM->vmm.s.fRendezvousRecursion)
2468 break;
2469 rcStrict2 = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict2);
2470 }
2471
2472 /*
2473 * Get the return code and clean up a little bit.
2474 */
2475 VBOXSTRICTRC rcStrict3 = pVM->vmm.s.i32RendezvousStatus;
2476 ASMAtomicWriteNullPtr((void * volatile *)&pVM->vmm.s.pfnRendezvous);
2477
2478 ASMAtomicWriteU32(&pVM->vmm.s.u32RendezvousLock, 0);
2479 pVCpu->vmm.s.fInRendezvous = false;
2480
2481 /*
2482 * Merge rcStrict, rcStrict2 and rcStrict3.
2483 */
2484 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2485 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
2486 if ( rcStrict2 != VINF_SUCCESS
2487 && ( rcStrict == VINF_SUCCESS
2488 || rcStrict > rcStrict2))
2489 rcStrict = rcStrict2;
2490 if ( rcStrict3 != VINF_SUCCESS
2491 && ( rcStrict == VINF_SUCCESS
2492 || rcStrict > rcStrict3))
2493 rcStrict = rcStrict3;
2494 Log(("VMMR3EmtRendezvous: %#x EMT#%u returns %Rrc\n", fFlags, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
2495 }
2496
2497 AssertLogRelMsgReturn( rcStrict <= VINF_SUCCESS
2498 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
2499 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
2500 VERR_IPE_UNEXPECTED_INFO_STATUS);
2501 return VBOXSTRICTRC_VAL(rcStrict);
2502}
2503
2504
2505/**
2506 * Interface for vmR3SetHaltMethodU.
2507 *
2508 * @param pVCpu The cross context virtual CPU structure of the
2509 * calling EMT.
2510 * @param fMayHaltInRing0 The new state.
2511 * @param cNsSpinBlockThreshold The spin-vs-blocking threashold.
2512 * @thread EMT(pVCpu)
2513 *
2514 * @todo Move the EMT handling to VMM (or EM). I soooooo regret that VM
2515 * component.
2516 */
2517VMMR3_INT_DECL(void) VMMR3SetMayHaltInRing0(PVMCPU pVCpu, bool fMayHaltInRing0, uint32_t cNsSpinBlockThreshold)
2518{
2519 pVCpu->vmm.s.fMayHaltInRing0 = fMayHaltInRing0;
2520 pVCpu->vmm.s.cNsSpinBlockThreshold = cNsSpinBlockThreshold;
2521}
2522
2523
2524/**
2525 * Read from the ring 0 jump buffer stack.
2526 *
2527 * @returns VBox status code.
2528 *
2529 * @param pVM The cross context VM structure.
2530 * @param idCpu The ID of the source CPU context (for the address).
2531 * @param R0Addr Where to start reading.
2532 * @param pvBuf Where to store the data we've read.
2533 * @param cbRead The number of bytes to read.
2534 */
2535VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead)
2536{
2537 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
2538 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
2539 AssertReturn(cbRead < ~(size_t)0 / 2, VERR_INVALID_PARAMETER);
2540
2541 int rc;
2542#ifdef VMM_R0_SWITCH_STACK
2543 RTHCUINTPTR off = R0Addr - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
2544#else
2545 RTHCUINTPTR off = pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack - (pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck - R0Addr);
2546#endif
2547 if ( off < VMM_STACK_SIZE
2548 && off + cbRead <= VMM_STACK_SIZE)
2549 {
2550 memcpy(pvBuf, &pVCpu->vmm.s.pbEMTStackR3[off], cbRead);
2551 rc = VINF_SUCCESS;
2552 }
2553 else
2554 rc = VERR_INVALID_POINTER;
2555
2556 /* Supply the setjmp return RIP/EIP. */
2557 if ( pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr
2558 && pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation < R0Addr + cbRead)
2559 {
2560 uint8_t const *pbSrc = (uint8_t const *)&pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcValue;
2561 size_t cbSrc = sizeof(pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcValue);
2562 size_t offDst = 0;
2563 if (R0Addr < pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation)
2564 offDst = pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation - R0Addr;
2565 else if (R0Addr > pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation)
2566 {
2567 size_t offSrc = R0Addr - pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation;
2568 Assert(offSrc < cbSrc);
2569 pbSrc -= offSrc;
2570 cbSrc -= offSrc;
2571 }
2572 if (cbSrc > cbRead - offDst)
2573 cbSrc = cbRead - offDst;
2574 memcpy((uint8_t *)pvBuf + offDst, pbSrc, cbSrc);
2575
2576 if (cbSrc == cbRead)
2577 rc = VINF_SUCCESS;
2578 }
2579
2580 return rc;
2581}
2582
2583
2584/**
2585 * Used by the DBGF stack unwinder to initialize the register state.
2586 *
2587 * @param pUVM The user mode VM handle.
2588 * @param idCpu The ID of the CPU being unwound.
2589 * @param pState The unwind state to initialize.
2590 */
2591VMMR3_INT_DECL(void) VMMR3InitR0StackUnwindState(PUVM pUVM, VMCPUID idCpu, struct RTDBGUNWINDSTATE *pState)
2592{
2593 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
2594 AssertReturnVoid(pVCpu);
2595
2596 /*
2597 * Locate the resume point on the stack.
2598 */
2599#ifdef VMM_R0_SWITCH_STACK
2600 uintptr_t off = pVCpu->vmm.s.CallRing3JmpBufR0.SpResume - MMHyperCCToR0(pVCpu->pVMR3, pVCpu->vmm.s.pbEMTStackR3);
2601 AssertReturnVoid(off < VMM_STACK_SIZE);
2602#else
2603 uintptr_t off = 0;
2604#endif
2605
2606#ifdef RT_ARCH_AMD64
2607 /*
2608 * This code must match the .resume stuff in VMMR0JmpA-amd64.asm exactly.
2609 */
2610# ifdef VBOX_STRICT
2611 Assert(*(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off] == UINT32_C(0x7eadf00d));
2612 off += 8; /* RESUME_MAGIC */
2613# endif
2614# ifdef RT_OS_WINDOWS
2615 off += 0xa0; /* XMM6 thru XMM15 */
2616# endif
2617 pState->u.x86.uRFlags = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2618 off += 8;
2619 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2620 off += 8;
2621# ifdef RT_OS_WINDOWS
2622 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2623 off += 8;
2624 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2625 off += 8;
2626# endif
2627 pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2628 off += 8;
2629 pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2630 off += 8;
2631 pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2632 off += 8;
2633 pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2634 off += 8;
2635 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2636 off += 8;
2637 pState->uPc = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2638 off += 8;
2639
2640#elif defined(RT_ARCH_X86)
2641 /*
2642 * This code must match the .resume stuff in VMMR0JmpA-x86.asm exactly.
2643 */
2644# ifdef VBOX_STRICT
2645 Assert(*(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off] == UINT32_C(0x7eadf00d));
2646 off += 4; /* RESUME_MAGIC */
2647# endif
2648 pState->u.x86.uRFlags = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2649 off += 4;
2650 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2651 off += 4;
2652 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2653 off += 4;
2654 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2655 off += 4;
2656 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2657 off += 4;
2658 pState->uPc = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
2659 off += 4;
2660#else
2661# error "Port me"
2662#endif
2663
2664 /*
2665 * This is all we really need here, though the above helps if the assembly
2666 * doesn't contain unwind info (currently only on win/64, so that is useful).
2667 */
2668 pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp;
2669 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.CallRing3JmpBufR0.SpResume;
2670}
2671
2672#ifdef VBOX_WITH_RAW_MODE
2673
2674/**
2675 * Calls a RC function.
2676 *
2677 * @param pVM The cross context VM structure.
2678 * @param RCPtrEntry The address of the RC function.
2679 * @param cArgs The number of arguments in the ....
2680 * @param ... Arguments to the function.
2681 */
2682VMMR3DECL(int) VMMR3CallRC(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, ...)
2683{
2684 va_list args;
2685 va_start(args, cArgs);
2686 int rc = VMMR3CallRCV(pVM, RCPtrEntry, cArgs, args);
2687 va_end(args);
2688 return rc;
2689}
2690
2691
2692/**
2693 * Calls a RC function.
2694 *
2695 * @param pVM The cross context VM structure.
2696 * @param RCPtrEntry The address of the RC function.
2697 * @param cArgs The number of arguments in the ....
2698 * @param args Arguments to the function.
2699 */
2700VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list args)
2701{
2702 /* Raw mode implies 1 VCPU. */
2703 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
2704 PVMCPU pVCpu = &pVM->aCpus[0];
2705
2706 Log2(("VMMR3CallGCV: RCPtrEntry=%RRv cArgs=%d\n", RCPtrEntry, cArgs));
2707
2708 /*
2709 * Setup the call frame using the trampoline.
2710 */
2711 CPUMSetHyperState(pVCpu,
2712 pVM->vmm.s.pfnCallTrampolineRC, /* eip */
2713 pVCpu->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32), /* esp */
2714 RCPtrEntry, /* eax */
2715 cArgs /* edx */
2716 );
2717
2718#if 0
2719 memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
2720#endif
2721 PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
2722 int i = cArgs;
2723 while (i-- > 0)
2724 *pFrame++ = va_arg(args, RTGCUINTPTR32);
2725
2726 CPUMPushHyper(pVCpu, cArgs * sizeof(RTGCUINTPTR32)); /* stack frame size */
2727 CPUMPushHyper(pVCpu, RCPtrEntry); /* what to call */
2728
2729 /*
2730 * We hide log flushes (outer) and hypervisor interrupts (inner).
2731 */
2732 for (;;)
2733 {
2734 int rc;
2735 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
2736 do
2737 {
2738#ifdef NO_SUPCALLR0VMM
2739 rc = VERR_GENERAL_FAILURE;
2740#else
2741 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
2742 if (RT_LIKELY(rc == VINF_SUCCESS))
2743 rc = pVCpu->vmm.s.iLastGZRc;
2744#endif
2745 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2746
2747 /*
2748 * Flush the loggers.
2749 */
2750#ifdef LOG_ENABLED
2751 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
2752 if ( pLogger
2753 && pLogger->offScratch > 0)
2754 RTLogFlushRC(NULL, pLogger);
2755#endif
2756#ifdef VBOX_WITH_RC_RELEASE_LOGGING
2757 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
2758 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2759 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
2760#endif
2761 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2762 VMMR3FatalDump(pVM, pVCpu, rc);
2763 if (rc != VINF_VMM_CALL_HOST)
2764 {
2765 Log2(("VMMR3CallGCV: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
2766 return rc;
2767 }
2768 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2769 if (RT_FAILURE(rc))
2770 return rc;
2771 }
2772}
2773
2774#endif /* VBOX_WITH_RAW_MODE */
2775
2776/**
2777 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
2778 *
2779 * @returns VBox status code.
2780 * @param pVM The cross context VM structure.
2781 * @param uOperation Operation to execute.
2782 * @param u64Arg Constant argument.
2783 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
2784 * details.
2785 */
2786VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
2787{
2788 PVMCPU pVCpu = VMMGetCpu(pVM);
2789 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
2790 return VMMR3CallR0Emt(pVM, pVCpu, (VMMR0OPERATION)uOperation, u64Arg, pReqHdr);
2791}
2792
2793
2794/**
2795 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
2796 *
2797 * @returns VBox status code.
2798 * @param pVM The cross context VM structure.
2799 * @param pVCpu The cross context VM structure.
2800 * @param enmOperation Operation to execute.
2801 * @param u64Arg Constant argument.
2802 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
2803 * details.
2804 */
2805VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
2806{
2807 int rc;
2808 for (;;)
2809 {
2810#ifdef NO_SUPCALLR0VMM
2811 rc = VERR_GENERAL_FAILURE;
2812#else
2813 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
2814#endif
2815 /*
2816 * Flush the logs.
2817 */
2818#ifdef LOG_ENABLED
2819 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0LoggerR3, NULL);
2820#endif
2821 VMM_FLUSH_R0_LOG(pVCpu->vmm.s.pR0RelLoggerR3, RTLogRelGetDefaultInstance());
2822 if (rc != VINF_VMM_CALL_HOST)
2823 break;
2824 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2825 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
2826 break;
2827 /* Resume R0 */
2828 }
2829
2830 AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
2831 ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
2832 VERR_IPE_UNEXPECTED_INFO_STATUS);
2833 return rc;
2834}
2835
2836
2837#ifdef VBOX_WITH_RAW_MODE
2838/**
2839 * Resumes executing hypervisor code when interrupted by a queue flush or a
2840 * debug event.
2841 *
2842 * @returns VBox status code.
2843 * @param pVM The cross context VM structure.
2844 * @param pVCpu The cross context virtual CPU structure.
2845 */
2846VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu)
2847{
2848 Log(("VMMR3ResumeHyper: eip=%RRv esp=%RRv\n", CPUMGetHyperEIP(pVCpu), CPUMGetHyperESP(pVCpu)));
2849 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
2850
2851 /*
2852 * We hide log flushes (outer) and hypervisor interrupts (inner).
2853 */
2854 for (;;)
2855 {
2856 int rc;
2857 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
2858 do
2859 {
2860# ifdef NO_SUPCALLR0VMM
2861 rc = VERR_GENERAL_FAILURE;
2862# else
2863 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
2864 if (RT_LIKELY(rc == VINF_SUCCESS))
2865 rc = pVCpu->vmm.s.iLastGZRc;
2866# endif
2867 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2868
2869 /*
2870 * Flush the loggers.
2871 */
2872# ifdef LOG_ENABLED
2873 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
2874 if ( pLogger
2875 && pLogger->offScratch > 0)
2876 RTLogFlushRC(NULL, pLogger);
2877# endif
2878# ifdef VBOX_WITH_RC_RELEASE_LOGGING
2879 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
2880 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2881 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
2882# endif
2883 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2884 VMMR3FatalDump(pVM, pVCpu, rc);
2885 if (rc != VINF_VMM_CALL_HOST)
2886 {
2887 Log(("VMMR3ResumeHyper: returns %Rrc\n", rc));
2888 return rc;
2889 }
2890 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2891 if (RT_FAILURE(rc))
2892 return rc;
2893 }
2894}
2895#endif /* VBOX_WITH_RAW_MODE */
2896
2897
2898/**
2899 * Service a call to the ring-3 host code.
2900 *
2901 * @returns VBox status code.
2902 * @param pVM The cross context VM structure.
2903 * @param pVCpu The cross context virtual CPU structure.
2904 * @remarks Careful with critsects.
2905 */
2906static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
2907{
2908 /*
2909 * We must also check for pending critsect exits or else we can deadlock
2910 * when entering other critsects here.
2911 */
2912 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
2913 PDMCritSectBothFF(pVCpu);
2914
2915 switch (pVCpu->vmm.s.enmCallRing3Operation)
2916 {
2917 /*
2918 * Acquire a critical section.
2919 */
2920 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
2921 {
2922 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectEnterEx((PPDMCRITSECT)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2923 true /*fCallRing3*/);
2924 break;
2925 }
2926
2927 /*
2928 * Enter a r/w critical section exclusively.
2929 */
2930 case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL:
2931 {
2932 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterExclEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2933 true /*fCallRing3*/);
2934 break;
2935 }
2936
2937 /*
2938 * Enter a r/w critical section shared.
2939 */
2940 case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED:
2941 {
2942 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterSharedEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2943 true /*fCallRing3*/);
2944 break;
2945 }
2946
2947 /*
2948 * Acquire the PDM lock.
2949 */
2950 case VMMCALLRING3_PDM_LOCK:
2951 {
2952 pVCpu->vmm.s.rcCallRing3 = PDMR3LockCall(pVM);
2953 break;
2954 }
2955
2956 /*
2957 * Grow the PGM pool.
2958 */
2959 case VMMCALLRING3_PGM_POOL_GROW:
2960 {
2961 pVCpu->vmm.s.rcCallRing3 = PGMR3PoolGrow(pVM);
2962 break;
2963 }
2964
2965 /*
2966 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
2967 */
2968 case VMMCALLRING3_PGM_MAP_CHUNK:
2969 {
2970 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysChunkMap(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2971 break;
2972 }
2973
2974 /*
2975 * Allocates more handy pages.
2976 */
2977 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
2978 {
2979 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateHandyPages(pVM);
2980 break;
2981 }
2982
2983 /*
2984 * Allocates a large page.
2985 */
2986 case VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2987 {
2988 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateLargeHandyPage(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2989 break;
2990 }
2991
2992 /*
2993 * Acquire the PGM lock.
2994 */
2995 case VMMCALLRING3_PGM_LOCK:
2996 {
2997 pVCpu->vmm.s.rcCallRing3 = PGMR3LockCall(pVM);
2998 break;
2999 }
3000
3001 /*
3002 * Acquire the MM hypervisor heap lock.
3003 */
3004 case VMMCALLRING3_MMHYPER_LOCK:
3005 {
3006 pVCpu->vmm.s.rcCallRing3 = MMR3LockCall(pVM);
3007 break;
3008 }
3009
3010#ifdef VBOX_WITH_REM
3011 /*
3012 * Flush REM handler notifications.
3013 */
3014 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
3015 {
3016 REMR3ReplayHandlerNotifications(pVM);
3017 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
3018 break;
3019 }
3020#endif
3021
3022 /*
3023 * This is a noop. We just take this route to avoid unnecessary
3024 * tests in the loops.
3025 */
3026 case VMMCALLRING3_VMM_LOGGER_FLUSH:
3027 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
3028 LogAlways(("*FLUSH*\n"));
3029 break;
3030
3031 /*
3032 * Set the VM error message.
3033 */
3034 case VMMCALLRING3_VM_SET_ERROR:
3035 VMR3SetErrorWorker(pVM);
3036 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
3037 break;
3038
3039 /*
3040 * Set the VM runtime error message.
3041 */
3042 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
3043 pVCpu->vmm.s.rcCallRing3 = VMR3SetRuntimeErrorWorker(pVM);
3044 break;
3045
3046 /*
3047 * Signal a ring 0 hypervisor assertion.
3048 * Cancel the longjmp operation that's in progress.
3049 */
3050 case VMMCALLRING3_VM_R0_ASSERTION:
3051 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
3052 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
3053#ifdef RT_ARCH_X86
3054 pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
3055#else
3056 pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
3057#endif
3058#ifdef VMM_R0_SWITCH_STACK
3059 *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker */
3060#endif
3061 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
3062 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
3063 return VERR_VMM_RING0_ASSERTION;
3064
3065 /*
3066 * A forced switch to ring 0 for preemption purposes.
3067 */
3068 case VMMCALLRING3_VM_R0_PREEMPT:
3069 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
3070 break;
3071
3072 case VMMCALLRING3_FTM_SET_CHECKPOINT:
3073 pVCpu->vmm.s.rcCallRing3 = FTMR3SetCheckpoint(pVM, (FTMCHECKPOINTTYPE)pVCpu->vmm.s.u64CallRing3Arg);
3074 break;
3075
3076 default:
3077 AssertMsgFailed(("enmCallRing3Operation=%d\n", pVCpu->vmm.s.enmCallRing3Operation));
3078 return VERR_VMM_UNKNOWN_RING3_CALL;
3079 }
3080
3081 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
3082 return VINF_SUCCESS;
3083}
3084
3085
3086/**
3087 * Displays the Force action Flags.
3088 *
3089 * @param pVM The cross context VM structure.
3090 * @param pHlp The output helpers.
3091 * @param pszArgs The additional arguments (ignored).
3092 */
3093static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3094{
3095 int c;
3096 uint32_t f;
3097 NOREF(pszArgs);
3098
3099#define PRINT_FLAG(prf,flag) do { \
3100 if (f & (prf##flag)) \
3101 { \
3102 static const char *s_psz = #flag; \
3103 if (!(c % 6)) \
3104 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz); \
3105 else \
3106 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
3107 c++; \
3108 f &= ~(prf##flag); \
3109 } \
3110 } while (0)
3111
3112#define PRINT_GROUP(prf,grp,sfx) do { \
3113 if (f & (prf##grp##sfx)) \
3114 { \
3115 static const char *s_psz = #grp; \
3116 if (!(c % 5)) \
3117 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : " Groups:\n", s_psz); \
3118 else \
3119 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
3120 c++; \
3121 } \
3122 } while (0)
3123
3124 /*
3125 * The global flags.
3126 */
3127 const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
3128 pHlp->pfnPrintf(pHlp, "Global FFs: %#RX32", fGlobalForcedActions);
3129
3130 /* show the flag mnemonics */
3131 c = 0;
3132 f = fGlobalForcedActions;
3133 PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC);
3134 PRINT_FLAG(VM_FF_,PDM_QUEUES);
3135 PRINT_FLAG(VM_FF_,PDM_DMA);
3136 PRINT_FLAG(VM_FF_,DBGF);
3137 PRINT_FLAG(VM_FF_,REQUEST);
3138 PRINT_FLAG(VM_FF_,CHECK_VM_STATE);
3139 PRINT_FLAG(VM_FF_,RESET);
3140 PRINT_FLAG(VM_FF_,EMT_RENDEZVOUS);
3141 PRINT_FLAG(VM_FF_,PGM_NEED_HANDY_PAGES);
3142 PRINT_FLAG(VM_FF_,PGM_NO_MEMORY);
3143 PRINT_FLAG(VM_FF_,PGM_POOL_FLUSH_PENDING);
3144 PRINT_FLAG(VM_FF_,REM_HANDLER_NOTIFY);
3145 PRINT_FLAG(VM_FF_,DEBUG_SUSPEND);
3146 if (f)
3147 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
3148 else
3149 pHlp->pfnPrintf(pHlp, "\n");
3150
3151 /* the groups */
3152 c = 0;
3153 f = fGlobalForcedActions;
3154 PRINT_GROUP(VM_FF_,EXTERNAL_SUSPENDED,_MASK);
3155 PRINT_GROUP(VM_FF_,EXTERNAL_HALTED,_MASK);
3156 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE,_MASK);
3157 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
3158 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_POST,_MASK);
3159 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY_POST,_MASK);
3160 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY,_MASK);
3161 PRINT_GROUP(VM_FF_,ALL_REM,_MASK);
3162 if (c)
3163 pHlp->pfnPrintf(pHlp, "\n");
3164
3165 /*
3166 * Per CPU flags.
3167 */
3168 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3169 {
3170 const uint64_t fLocalForcedActions = pVM->aCpus[i].fLocalForcedActions;
3171 pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX64", i, fLocalForcedActions);
3172
3173 /* show the flag mnemonics */
3174 c = 0;
3175 f = fLocalForcedActions;
3176 PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC);
3177 PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC);
3178 PRINT_FLAG(VMCPU_FF_,TIMER);
3179 PRINT_FLAG(VMCPU_FF_,INTERRUPT_NMI);
3180 PRINT_FLAG(VMCPU_FF_,INTERRUPT_SMI);
3181 PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT);
3182 PRINT_FLAG(VMCPU_FF_,UNHALT);
3183 PRINT_FLAG(VMCPU_FF_,IEM);
3184 PRINT_FLAG(VMCPU_FF_,UPDATE_APIC);
3185 PRINT_FLAG(VMCPU_FF_,DBGF);
3186 PRINT_FLAG(VMCPU_FF_,REQUEST);
3187 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_CR3);
3188 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_PAE_PDPES);
3189 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
3190 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
3191 PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
3192 PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);
3193 PRINT_FLAG(VMCPU_FF_,BLOCK_NMIS);
3194 PRINT_FLAG(VMCPU_FF_,TO_R3);
3195 PRINT_FLAG(VMCPU_FF_,IOM);
3196#ifdef VBOX_WITH_RAW_MODE
3197 PRINT_FLAG(VMCPU_FF_,TRPM_SYNC_IDT);
3198 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_TSS);
3199 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_GDT);
3200 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_LDT);
3201 PRINT_FLAG(VMCPU_FF_,CSAM_SCAN_PAGE);
3202 PRINT_FLAG(VMCPU_FF_,CSAM_PENDING_ACTION);
3203 PRINT_FLAG(VMCPU_FF_,CPUM);
3204#endif
3205 if (f)
3206 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX64\n", c ? "," : "", f);
3207 else
3208 pHlp->pfnPrintf(pHlp, "\n");
3209
3210 if (fLocalForcedActions & VMCPU_FF_INHIBIT_INTERRUPTS)
3211 pHlp->pfnPrintf(pHlp, " intr inhibit RIP: %RGp\n", EMGetInhibitInterruptsPC(&pVM->aCpus[i]));
3212
3213 /* the groups */
3214 c = 0;
3215 f = fLocalForcedActions;
3216 PRINT_GROUP(VMCPU_FF_,EXTERNAL_SUSPENDED,_MASK);
3217 PRINT_GROUP(VMCPU_FF_,EXTERNAL_HALTED,_MASK);
3218 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE,_MASK);
3219 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
3220 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_POST,_MASK);
3221 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY_POST,_MASK);
3222 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
3223 PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
3224 PRINT_GROUP(VMCPU_FF_,HM_TO_R3,_MASK);
3225 PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
3226 if (c)
3227 pHlp->pfnPrintf(pHlp, "\n");
3228 }
3229
3230#undef PRINT_FLAG
3231#undef PRINT_GROUP
3232}
3233
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette