VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 94155

Last change on this file since 94155 was 93901, checked in by vboxsync, 3 years ago

VMM,Main,++: Removed VM_IS_RAW_MODE_ENABLED/VM_EXEC_ENGINE_RAW_MODE and added VM_IS_EXEC_ENGINE_IEM/VM_EXEC_ENGINE_IEM instead. In IMachineDebugger::getExecutionEngine VMExecutionEngine_RawMode was removed and VMExecutionEngine_Emulated added. Removed dead code and updated frontends accordingly. On darwin.arm64 HM now falls back on IEM execution since neither HM or NEM is availble there. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 157.9 KB
Line 
1/* $Id: VM.cpp 93901 2022-02-23 15:35:26Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41
42/*********************************************************************************************************************************
43* Header Files *
44*********************************************************************************************************************************/
45#define LOG_GROUP LOG_GROUP_VM
46#include <VBox/vmm/cfgm.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/vmm/gvmm.h>
49#include <VBox/vmm/mm.h>
50#include <VBox/vmm/cpum.h>
51#include <VBox/vmm/selm.h>
52#include <VBox/vmm/trpm.h>
53#include <VBox/vmm/dbgf.h>
54#include <VBox/vmm/pgm.h>
55#include <VBox/vmm/pdmapi.h>
56#include <VBox/vmm/pdmdev.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/em.h>
59#include <VBox/vmm/iem.h>
60#include <VBox/vmm/nem.h>
61#include <VBox/vmm/apic.h>
62#include <VBox/vmm/tm.h>
63#include <VBox/vmm/stam.h>
64#include <VBox/vmm/iom.h>
65#include <VBox/vmm/ssm.h>
66#include <VBox/vmm/hm.h>
67#include <VBox/vmm/gim.h>
68#include "VMInternal.h"
69#include <VBox/vmm/vmcc.h>
70
71#include <VBox/sup.h>
72#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
73# include <VBox/VBoxTpG.h>
74#endif
75#include <VBox/dbg.h>
76#include <VBox/err.h>
77#include <VBox/param.h>
78#include <VBox/log.h>
79#include <iprt/assert.h>
80#include <iprt/alloca.h>
81#include <iprt/asm.h>
82#include <iprt/env.h>
83#include <iprt/mem.h>
84#include <iprt/semaphore.h>
85#include <iprt/string.h>
86#ifdef RT_OS_DARWIN
87# include <iprt/system.h>
88#endif
89#include <iprt/time.h>
90#include <iprt/thread.h>
91#include <iprt/uuid.h>
92
93
94/*********************************************************************************************************************************
95* Internal Functions *
96*********************************************************************************************************************************/
97static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
98static DECLCALLBACK(int) vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
99static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus);
100static int vmR3InitRing3(PVM pVM, PUVM pUVM);
101static int vmR3InitRing0(PVM pVM);
102static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
103static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
104static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
105static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
106static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
107static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF);
108static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
109static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...) RT_IPRT_FORMAT_ATTR(6, 7);
110
111
112/**
113 * Creates a virtual machine by calling the supplied configuration constructor.
114 *
115 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
116 * called to start the execution.
117 *
118 * @returns 0 on success.
119 * @returns VBox error code on failure.
120 * @param cCpus Number of virtual CPUs for the new VM.
121 * @param pVmm2UserMethods An optional method table that the VMM can use
122 * to make the user perform various action, like
123 * for instance state saving.
124 * @param pfnVMAtError Pointer to callback function for setting VM
125 * errors. This was added as an implicit call to
126 * VMR3AtErrorRegister() since there is no way the
127 * caller can get to the VM handle early enough to
128 * do this on its own.
129 * This is called in the context of an EMT.
130 * @param pvUserVM The user argument passed to pfnVMAtError.
131 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
132 * This is called in the context of an EMT0.
133 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
134 * @param ppVM Where to optionally store the 'handle' of the
135 * created VM.
136 * @param ppUVM Where to optionally store the user 'handle' of
137 * the created VM, this includes one reference as
138 * if VMR3RetainUVM() was called. The caller
139 * *MUST* remember to pass the returned value to
140 * VMR3ReleaseUVM() once done with the handle.
141 */
142VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
143 PFNVMATERROR pfnVMAtError, void *pvUserVM,
144 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
145 PVM *ppVM, PUVM *ppUVM)
146{
147 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p ppUVM=%p\n",
148 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM, ppUVM));
149
150 if (pVmm2UserMethods)
151 {
152 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
153 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
154 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
155 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
156 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
157 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
158 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
159 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
160 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff, VERR_INVALID_POINTER);
161 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
162 }
163 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
164 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
165 AssertPtrNullReturn(ppVM, VERR_INVALID_POINTER);
166 AssertPtrNullReturn(ppUVM, VERR_INVALID_POINTER);
167 AssertReturn(ppVM || ppUVM, VERR_INVALID_PARAMETER);
168
169 /*
170 * Validate input.
171 */
172 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
173
174 /*
175 * Create the UVM so we can register the at-error callback
176 * and consolidate a bit of cleanup code.
177 */
178 PUVM pUVM = NULL; /* shuts up gcc */
179 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
180 if (RT_FAILURE(rc))
181 return rc;
182 if (pfnVMAtError)
183 rc = VMR3AtErrorRegister(pUVM, pfnVMAtError, pvUserVM);
184 if (RT_SUCCESS(rc))
185 {
186 /*
187 * Initialize the support library creating the session for this VM.
188 */
189 rc = SUPR3Init(&pUVM->vm.s.pSession);
190 if (RT_SUCCESS(rc))
191 {
192#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
193 /* Now that we've opened the device, we can register trace probes. */
194 static bool s_fRegisteredProbes = false;
195 if (!SUPR3IsDriverless() && ASMAtomicCmpXchgBool(&s_fRegisteredProbes, true, false))
196 SUPR3TracerRegisterModule(~(uintptr_t)0, "VBoxVMM", &g_VTGObjHeader, (uintptr_t)&g_VTGObjHeader,
197 SUP_TRACER_UMOD_FLAGS_SHARED);
198#endif
199
200 /*
201 * Call vmR3CreateU in the EMT thread and wait for it to finish.
202 *
203 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
204 * submitting a request to a specific VCPU without a pVM. So, to make
205 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
206 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
207 */
208 PVMREQ pReq;
209 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
210 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
211 if (RT_SUCCESS(rc))
212 {
213 rc = pReq->iStatus;
214 VMR3ReqFree(pReq);
215 if (RT_SUCCESS(rc))
216 {
217 /*
218 * Success!
219 */
220 if (ppVM)
221 *ppVM = pUVM->pVM;
222 if (ppUVM)
223 {
224 VMR3RetainUVM(pUVM);
225 *ppUVM = pUVM;
226 }
227 LogFlow(("VMR3Create: returns VINF_SUCCESS (pVM=%p, pUVM=%p\n", pUVM->pVM, pUVM));
228 return VINF_SUCCESS;
229 }
230 }
231 else
232 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
233
234 /*
235 * An error occurred during VM creation. Set the error message directly
236 * using the initial callback, as the callback list might not exist yet.
237 */
238 const char *pszError;
239 switch (rc)
240 {
241 case VERR_VMX_IN_VMX_ROOT_MODE:
242#ifdef RT_OS_LINUX
243 pszError = N_("VirtualBox can't operate in VMX root mode. "
244 "Please disable the KVM kernel extension, recompile your kernel and reboot");
245#else
246 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
247#endif
248 break;
249
250#ifndef RT_OS_DARWIN
251 case VERR_HM_CONFIG_MISMATCH:
252 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
253 "This hardware extension is required by the VM configuration");
254 break;
255#endif
256
257 case VERR_SVM_IN_USE:
258#ifdef RT_OS_LINUX
259 pszError = N_("VirtualBox can't enable the AMD-V extension. "
260 "Please disable the KVM kernel extension, recompile your kernel and reboot");
261#else
262 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
263#endif
264 break;
265
266#ifdef RT_OS_LINUX
267 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
268 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
269 "that VirtualBox is correctly installed, and if you are using EFI "
270 "Secure Boot that the modules are signed if necessary in the right "
271 "way for your host system. Then try to recompile and reload the "
272 "kernel modules by executing "
273 "'/sbin/vboxconfig' as root");
274 break;
275#endif
276
277 case VERR_RAW_MODE_INVALID_SMP:
278 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
279 "VirtualBox requires this hardware extension to emulate more than one "
280 "guest CPU");
281 break;
282
283 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
284#ifdef RT_OS_LINUX
285 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
286 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
287 "the VT-x extension in the VM settings. Note that without VT-x you have "
288 "to reduce the number of guest CPUs to one");
289#else
290 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
291 "extension. Either upgrade your kernel or disable the VT-x extension in the "
292 "VM settings. Note that without VT-x you have to reduce the number of guest "
293 "CPUs to one");
294#endif
295 break;
296
297 case VERR_PDM_DEVICE_NOT_FOUND:
298 pszError = N_("A virtual device is configured in the VM settings but the device "
299 "implementation is missing.\n"
300 "A possible reason for this error is a missing extension pack. Note "
301 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
302 "support and remote desktop) are only available from an 'extension "
303 "pack' which must be downloaded and installed separately");
304 break;
305
306 case VERR_PCI_PASSTHROUGH_NO_HM:
307 pszError = N_("PCI passthrough requires VT-x/AMD-V");
308 break;
309
310 case VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING:
311 pszError = N_("PCI passthrough requires nested paging");
312 break;
313
314 default:
315 if (VMR3GetErrorCount(pUVM) == 0)
316 {
317 pszError = (char *)alloca(1024);
318 RTErrQueryMsgFull(rc, (char *)pszError, 1024, false /*fFailIfUnknown*/);
319 }
320 else
321 pszError = NULL; /* already set. */
322 break;
323 }
324 if (pszError)
325 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
326 }
327 else
328 {
329 /*
330 * An error occurred at support library initialization time (before the
331 * VM could be created). Set the error message directly using the
332 * initial callback, as the callback list doesn't exist yet.
333 */
334 const char *pszError;
335 switch (rc)
336 {
337 case VERR_VM_DRIVER_LOAD_ERROR:
338#ifdef RT_OS_LINUX
339 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
340 "was either not loaded, /dev/vboxdrv is not set up properly, "
341 "or you are using EFI Secure Boot and the module is not signed "
342 "in the right way for your system. If necessary, try setting up "
343 "the kernel module again by executing "
344 "'/sbin/vboxconfig' as root");
345#else
346 pszError = N_("VirtualBox kernel driver not loaded");
347#endif
348 break;
349 case VERR_VM_DRIVER_OPEN_ERROR:
350 pszError = N_("VirtualBox kernel driver cannot be opened");
351 break;
352 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
353#ifdef VBOX_WITH_HARDENING
354 /* This should only happen if the executable wasn't hardened - bad code/build. */
355 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
356 "Re-install VirtualBox. If you are building it yourself, you "
357 "should make sure it installed correctly and that the setuid "
358 "bit is set on the executables calling VMR3Create.");
359#else
360 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
361# if defined(RT_OS_DARWIN)
362 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
363 "If you have built VirtualBox yourself, make sure that you do not "
364 "have the vboxdrv KEXT from a different build or installation loaded.");
365# elif defined(RT_OS_LINUX)
366 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
367 "If you have built VirtualBox yourself, make sure that you do "
368 "not have the vboxdrv kernel module from a different build or "
369 "installation loaded. Also, make sure the vboxdrv udev rule gives "
370 "you the permission you need to access the device.");
371# elif defined(RT_OS_WINDOWS)
372 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
373# else /* solaris, freebsd, ++. */
374 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
375 "If you have built VirtualBox yourself, make sure that you do "
376 "not have the vboxdrv kernel module from a different install loaded.");
377# endif
378#endif
379 break;
380 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
381 case VERR_VM_DRIVER_NOT_INSTALLED:
382#ifdef RT_OS_LINUX
383 pszError = N_("VirtualBox kernel driver not Installed. The vboxdrv kernel module "
384 "was either not loaded, /dev/vboxdrv is not set up properly, "
385 "or you are using EFI Secure Boot and the module is not signed "
386 "in the right way for your system. If necessary, try setting up "
387 "the kernel module again by executing "
388 "'/sbin/vboxconfig' as root");
389#else
390 pszError = N_("VirtualBox kernel driver not installed");
391#endif
392 break;
393 case VERR_NO_MEMORY:
394 pszError = N_("VirtualBox support library out of memory");
395 break;
396 case VERR_VERSION_MISMATCH:
397 case VERR_VM_DRIVER_VERSION_MISMATCH:
398 pszError = N_("The VirtualBox support driver which is running is from a different "
399 "version of VirtualBox. You can correct this by stopping all "
400 "running instances of VirtualBox and reinstalling the software.");
401 break;
402 default:
403 pszError = N_("Unknown error initializing kernel driver");
404 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
405 }
406 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
407 }
408 }
409
410 /* cleanup */
411 vmR3DestroyUVM(pUVM, 2000);
412 LogFlow(("VMR3Create: returns %Rrc\n", rc));
413 return rc;
414}
415
416
417/**
418 * Creates the UVM.
419 *
420 * This will not initialize the support library even if vmR3DestroyUVM
421 * will terminate that.
422 *
423 * @returns VBox status code.
424 * @param cCpus Number of virtual CPUs
425 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
426 * table.
427 * @param ppUVM Where to store the UVM pointer.
428 */
429static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
430{
431 uint32_t i;
432
433 /*
434 * Create and initialize the UVM.
435 */
436 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_UOFFSETOF_DYN(UVM, aCpus[cCpus]));
437 AssertReturn(pUVM, VERR_NO_MEMORY);
438 pUVM->u32Magic = UVM_MAGIC;
439 pUVM->cCpus = cCpus;
440 pUVM->pVmm2UserMethods = pVmm2UserMethods;
441
442 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
443
444 pUVM->vm.s.cUvmRefs = 1;
445 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
446 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
447 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
448
449 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
450 RTUuidClear(&pUVM->vm.s.Uuid);
451
452 /* Initialize the VMCPU array in the UVM. */
453 for (i = 0; i < cCpus; i++)
454 {
455 pUVM->aCpus[i].pUVM = pUVM;
456 pUVM->aCpus[i].idCpu = i;
457 }
458
459 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
460 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
461 AssertRC(rc);
462 if (RT_SUCCESS(rc))
463 {
464 /* Allocate a halt method event semaphore for each VCPU. */
465 for (i = 0; i < cCpus; i++)
466 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
467 for (i = 0; i < cCpus; i++)
468 {
469 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
470 if (RT_FAILURE(rc))
471 break;
472 }
473 if (RT_SUCCESS(rc))
474 {
475 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
476 if (RT_SUCCESS(rc))
477 {
478 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
479 if (RT_SUCCESS(rc))
480 {
481 /*
482 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
483 */
484 rc = PDMR3InitUVM(pUVM);
485 if (RT_SUCCESS(rc))
486 {
487 rc = STAMR3InitUVM(pUVM);
488 if (RT_SUCCESS(rc))
489 {
490 rc = MMR3InitUVM(pUVM);
491 if (RT_SUCCESS(rc))
492 {
493 /*
494 * Start the emulation threads for all VMCPUs.
495 */
496 for (i = 0; i < cCpus; i++)
497 {
498 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i],
499 _1M, RTTHREADTYPE_EMULATION,
500 RTTHREADFLAGS_WAITABLE | RTTHREADFLAGS_COM_MTA | RTTHREADFLAGS_NO_SIGNALS,
501 cCpus > 1 ? "EMT-%u" : "EMT", i);
502 if (RT_FAILURE(rc))
503 break;
504
505 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
506 }
507
508 if (RT_SUCCESS(rc))
509 {
510 *ppUVM = pUVM;
511 return VINF_SUCCESS;
512 }
513
514 /* bail out. */
515 while (i-- > 0)
516 {
517 /** @todo rainy day: terminate the EMTs. */
518 }
519 MMR3TermUVM(pUVM);
520 }
521 STAMR3TermUVM(pUVM);
522 }
523 PDMR3TermUVM(pUVM);
524 }
525 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
526 }
527 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
528 }
529 }
530 for (i = 0; i < cCpus; i++)
531 {
532 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
533 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
534 }
535 RTTlsFree(pUVM->vm.s.idxTLS);
536 }
537 RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
538 return rc;
539}
540
541
542/**
543 * Creates and initializes the VM.
544 *
545 * @thread EMT
546 */
547static DECLCALLBACK(int) vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
548{
549#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
550 /*
551 * Require SSE2 to be present (already checked for in supdrv, so we
552 * shouldn't ever really get here).
553 */
554 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
555 {
556 LogRel(("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1)));
557 return VERR_UNSUPPORTED_CPU;
558 }
559#endif
560
561
562 /*
563 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
564 */
565 if (!SUPR3IsDriverless())
566 {
567 int rc = PDMR3LdrLoadVMMR0U(pUVM);
568 if (RT_FAILURE(rc))
569 {
570 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
571 * bird: what about moving the message down here? Main picks the first message, right? */
572 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
573 return rc; /* proper error message set later on */
574 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
575 }
576 }
577
578 /*
579 * Request GVMM to create a new VM for us.
580 */
581 RTR0PTR pVMR0;
582 int rc = GVMMR3CreateVM(pUVM, cCpus, pUVM->vm.s.pSession, &pUVM->pVM, &pVMR0);
583 if (RT_SUCCESS(rc))
584 {
585 PVM pVM = pUVM->pVM;
586 AssertRelease(RT_VALID_PTR(pVM));
587 AssertRelease(pVM->pVMR0ForCall == pVMR0);
588 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
589 AssertRelease(pVM->cCpus == cCpus);
590 AssertRelease(pVM->uCpuExecutionCap == 100);
591 AssertCompileMemberAlignment(VM, cpum, 64);
592 AssertCompileMemberAlignment(VM, tm, 64);
593
594 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n", pUVM, pVM, pVMR0, pVM->hSelf, pVM->cCpus));
595
596 /*
597 * Initialize the VM structure and our internal data (VMINT).
598 */
599 pVM->pUVM = pUVM;
600
601 for (VMCPUID i = 0; i < pVM->cCpus; i++)
602 {
603 PVMCPU pVCpu = pVM->apCpusR3[i];
604 pVCpu->pUVCpu = &pUVM->aCpus[i];
605 pVCpu->idCpu = i;
606 pVCpu->hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
607 pVCpu->hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
608 Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD);
609 /* hNativeThreadR0 is initialized on EMT registration. */
610 pUVM->aCpus[i].pVCpu = pVCpu;
611 pUVM->aCpus[i].pVM = pVM;
612 }
613
614 /*
615 * Init the configuration.
616 */
617 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
618 if (RT_SUCCESS(rc))
619 {
620 rc = vmR3ReadBaseConfig(pVM, pUVM, cCpus);
621 if (RT_SUCCESS(rc))
622 {
623 /*
624 * Init the ring-3 components and ring-3 per cpu data, finishing it off
625 * by a relocation round (intermediate context finalization will do this).
626 */
627 rc = vmR3InitRing3(pVM, pUVM);
628 if (RT_SUCCESS(rc))
629 {
630 LogFlow(("Ring-3 init succeeded\n"));
631
632 /*
633 * Init the Ring-0 components.
634 */
635 rc = vmR3InitRing0(pVM);
636 if (RT_SUCCESS(rc))
637 {
638 /* Relocate again, because some switcher fixups depends on R0 init results. */
639 VMR3Relocate(pVM, 0 /* offDelta */);
640
641#ifdef VBOX_WITH_DEBUGGER
642 /*
643 * Init the tcp debugger console if we're building
644 * with debugger support.
645 */
646 void *pvUser = NULL;
647 rc = DBGCIoCreate(pUVM, &pvUser);
648 if ( RT_SUCCESS(rc)
649 || rc == VERR_NET_ADDRESS_IN_USE)
650 {
651 pUVM->vm.s.pvDBGC = pvUser;
652#endif
653 /*
654 * Now we can safely set the VM halt method to default.
655 */
656 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
657 if (RT_SUCCESS(rc))
658 {
659 /*
660 * Set the state and we're done.
661 */
662 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
663 return VINF_SUCCESS;
664 }
665#ifdef VBOX_WITH_DEBUGGER
666 DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
667 pUVM->vm.s.pvDBGC = NULL;
668 }
669#endif
670 //..
671 }
672 vmR3Destroy(pVM);
673 }
674 }
675 //..
676
677 /* Clean CFGM. */
678 int rc2 = CFGMR3Term(pVM);
679 AssertRC(rc2);
680 }
681
682 /*
683 * Do automatic cleanups while the VM structure is still alive and all
684 * references to it are still working.
685 */
686 PDMR3CritSectBothTerm(pVM);
687
688 /*
689 * Drop all references to VM and the VMCPU structures, then
690 * tell GVMM to destroy the VM.
691 */
692 pUVM->pVM = NULL;
693 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
694 {
695 pUVM->aCpus[i].pVM = NULL;
696 pUVM->aCpus[i].pVCpu = NULL;
697 }
698 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
699
700 if (pUVM->cCpus > 1)
701 {
702 /* Poke the other EMTs since they may have stale pVM and pVCpu references
703 on the stack (see VMR3WaitU for instance) if they've been awakened after
704 VM creation. */
705 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
706 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
707 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
708 }
709
710 int rc2 = GVMMR3DestroyVM(pUVM, pVM);
711 AssertRC(rc2);
712 }
713 else
714 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
715
716 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
717 return rc;
718}
719
720
721/**
722 * Reads the base configuation from CFGM.
723 *
724 * @returns VBox status code.
725 * @param pVM The cross context VM structure.
726 * @param pUVM The user mode VM structure.
727 * @param cCpus The CPU count given to VMR3Create.
728 */
729static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus)
730{
731 PCFGMNODE const pRoot = CFGMR3GetRoot(pVM);
732
733 /*
734 * Base EM and HM config properties.
735 */
736#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
737 pVM->fHMEnabled = true;
738#else /* Other architectures must fall back on IEM for the time being: */
739 pVM->fHMEnabled = false;
740#endif
741
742 /*
743 * Make sure the CPU count in the config data matches.
744 */
745 uint32_t cCPUsCfg;
746 int rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
747 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc), rc);
748 AssertLogRelMsgReturn(cCPUsCfg == cCpus,
749 ("Configuration error: \"NumCPUs\"=%RU32 and VMR3Create::cCpus=%RU32 does not match!\n",
750 cCPUsCfg, cCpus),
751 VERR_INVALID_PARAMETER);
752
753 /*
754 * Get the CPU execution cap.
755 */
756 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
757 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc), rc);
758
759 /*
760 * Get the VM name and UUID.
761 */
762 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
763 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc), rc);
764
765 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
766 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
767 rc = VINF_SUCCESS;
768 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc), rc);
769
770 rc = CFGMR3QueryBoolDef(pRoot, "PowerOffInsteadOfReset", &pVM->vm.s.fPowerOffInsteadOfReset, false);
771 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"PowerOffInsteadOfReset\" failed, rc=%Rrc\n", rc), rc);
772
773 return VINF_SUCCESS;
774}
775
776
777/**
778 * Initializes all R3 components of the VM
779 */
780static int vmR3InitRing3(PVM pVM, PUVM pUVM)
781{
782 int rc;
783
784 /*
785 * Register the other EMTs with GVM.
786 */
787 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
788 {
789 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)GVMMR3RegisterVCpu, 2, pVM, idCpu);
790 if (RT_FAILURE(rc))
791 return rc;
792 }
793
794 /*
795 * Register statistics.
796 */
797 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
798 {
799 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/CPU%d/VM/Halt/Yield", idCpu);
800 AssertRC(rc);
801 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/CPU%d/VM/Halt/Block", idCpu);
802 AssertRC(rc);
803 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/CPU%d/VM/Halt/BlockOverslept", idCpu);
804 AssertRC(rc);
805 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/CPU%d/VM/Halt/BlockInsomnia", idCpu);
806 AssertRC(rc);
807 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/CPU%d/VM/Halt/BlockOnTime", idCpu);
808 AssertRC(rc);
809 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/CPU%d/VM/Halt/Timers", idCpu);
810 AssertRC(rc);
811 }
812
813 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
814 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
815 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
816 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
817 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
818 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
819 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
820 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
821
822 /* Statistics for ring-0 components: */
823 STAM_REL_REG(pVM, &pVM->R0Stats.gmm.cChunkTlbHits, STAMTYPE_COUNTER, "/GMM/ChunkTlbHits", STAMUNIT_OCCURENCES, "GMMR0PageIdToVirt chunk TBL hits");
824 STAM_REL_REG(pVM, &pVM->R0Stats.gmm.cChunkTlbMisses, STAMTYPE_COUNTER, "/GMM/ChunkTlbMisses", STAMUNIT_OCCURENCES, "GMMR0PageIdToVirt chunk TBL misses");
825
826 /*
827 * Init all R3 components, the order here might be important.
828 * NEM and HM shall be initialized first!
829 */
830 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
831 rc = NEMR3InitConfig(pVM);
832 if (RT_SUCCESS(rc))
833 rc = HMR3Init(pVM);
834 if (RT_SUCCESS(rc))
835 {
836 ASMCompilerBarrier(); /* HMR3Init will have modified const member bMainExecutionEngine. */
837 Assert( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT
838 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API
839 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_IEM);
840 rc = MMR3Init(pVM);
841 if (RT_SUCCESS(rc))
842 {
843 rc = CPUMR3Init(pVM);
844 if (RT_SUCCESS(rc))
845 {
846 rc = NEMR3InitAfterCPUM(pVM);
847 if (RT_SUCCESS(rc))
848 rc = PGMR3Init(pVM);
849 if (RT_SUCCESS(rc))
850 {
851 rc = MMR3InitPaging(pVM);
852 if (RT_SUCCESS(rc))
853 rc = TMR3Init(pVM);
854 if (RT_SUCCESS(rc))
855 {
856 rc = VMMR3Init(pVM);
857 if (RT_SUCCESS(rc))
858 {
859 rc = SELMR3Init(pVM);
860 if (RT_SUCCESS(rc))
861 {
862 rc = TRPMR3Init(pVM);
863 if (RT_SUCCESS(rc))
864 {
865 rc = SSMR3RegisterStub(pVM, "CSAM", 0);
866 if (RT_SUCCESS(rc))
867 {
868 rc = SSMR3RegisterStub(pVM, "PATM", 0);
869 if (RT_SUCCESS(rc))
870 {
871 rc = IOMR3Init(pVM);
872 if (RT_SUCCESS(rc))
873 {
874 rc = EMR3Init(pVM);
875 if (RT_SUCCESS(rc))
876 {
877 rc = IEMR3Init(pVM);
878 if (RT_SUCCESS(rc))
879 {
880 rc = DBGFR3Init(pVM);
881 if (RT_SUCCESS(rc))
882 {
883 /* GIM must be init'd before PDM, gimdevR3Construct()
884 requires GIM provider to be setup. */
885 rc = GIMR3Init(pVM);
886 if (RT_SUCCESS(rc))
887 {
888 rc = PDMR3Init(pVM);
889 if (RT_SUCCESS(rc))
890 {
891 rc = PGMR3InitFinalize(pVM);
892 if (RT_SUCCESS(rc))
893 rc = TMR3InitFinalize(pVM);
894 if (RT_SUCCESS(rc))
895 {
896 PGMR3MemSetup(pVM, false /*fAtReset*/);
897 PDMR3MemSetup(pVM, false /*fAtReset*/);
898 }
899 if (RT_SUCCESS(rc))
900 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
901 if (RT_SUCCESS(rc))
902 {
903 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
904 return VINF_SUCCESS;
905 }
906
907 int rc2 = PDMR3Term(pVM);
908 AssertRC(rc2);
909 }
910 int rc2 = GIMR3Term(pVM);
911 AssertRC(rc2);
912 }
913 int rc2 = DBGFR3Term(pVM);
914 AssertRC(rc2);
915 }
916 int rc2 = IEMR3Term(pVM);
917 AssertRC(rc2);
918 }
919 int rc2 = EMR3Term(pVM);
920 AssertRC(rc2);
921 }
922 int rc2 = IOMR3Term(pVM);
923 AssertRC(rc2);
924 }
925 }
926 }
927 int rc2 = TRPMR3Term(pVM);
928 AssertRC(rc2);
929 }
930 int rc2 = SELMR3Term(pVM);
931 AssertRC(rc2);
932 }
933 int rc2 = VMMR3Term(pVM);
934 AssertRC(rc2);
935 }
936 int rc2 = TMR3Term(pVM);
937 AssertRC(rc2);
938 }
939 int rc2 = PGMR3Term(pVM);
940 AssertRC(rc2);
941 }
942 //int rc2 = CPUMR3Term(pVM);
943 //AssertRC(rc2);
944 }
945 /* MMR3Term is not called here because it'll kill the heap. */
946 }
947 int rc2 = HMR3Term(pVM);
948 AssertRC(rc2);
949 }
950 NEMR3Term(pVM);
951
952 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
953 return rc;
954}
955
956
957/**
958 * Initializes all R0 components of the VM.
959 */
960static int vmR3InitRing0(PVM pVM)
961{
962 LogFlow(("vmR3InitRing0:\n"));
963
964 /*
965 * Check for FAKE suplib mode.
966 */
967 int rc = VINF_SUCCESS;
968 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
969 if (!psz || strcmp(psz, "fake"))
970 {
971 /*
972 * Call the VMMR0 component and let it do the init.
973 */
974 rc = VMMR3InitR0(pVM);
975 }
976 else
977 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
978
979 /*
980 * Do notifications and return.
981 */
982 if (RT_SUCCESS(rc))
983 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
984 if (RT_SUCCESS(rc))
985 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HM);
986
987 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
988 return rc;
989}
990
991
992/**
993 * Do init completed notifications.
994 *
995 * @returns VBox status code.
996 * @param pVM The cross context VM structure.
997 * @param enmWhat What's completed.
998 */
999static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1000{
1001 int rc = VMMR3InitCompleted(pVM, enmWhat);
1002 if (RT_SUCCESS(rc))
1003 rc = HMR3InitCompleted(pVM, enmWhat);
1004 if (RT_SUCCESS(rc))
1005 rc = NEMR3InitCompleted(pVM, enmWhat);
1006 if (RT_SUCCESS(rc))
1007 rc = PGMR3InitCompleted(pVM, enmWhat);
1008 if (RT_SUCCESS(rc))
1009 rc = CPUMR3InitCompleted(pVM, enmWhat);
1010 if (RT_SUCCESS(rc))
1011 rc = EMR3InitCompleted(pVM, enmWhat);
1012 if (enmWhat == VMINITCOMPLETED_RING3)
1013 {
1014 if (RT_SUCCESS(rc))
1015 rc = SSMR3RegisterStub(pVM, "rem", 1);
1016 }
1017 if (RT_SUCCESS(rc))
1018 rc = PDMR3InitCompleted(pVM, enmWhat);
1019
1020 /* IOM *must* come after PDM, as device (DevPcArch) may register some final
1021 handlers in their init completion method. */
1022 if (RT_SUCCESS(rc))
1023 rc = IOMR3InitCompleted(pVM, enmWhat);
1024 return rc;
1025}
1026
1027
1028/**
1029 * Calls the relocation functions for all VMM components so they can update
1030 * any GC pointers. When this function is called all the basic VM members
1031 * have been updated and the actual memory relocation have been done
1032 * by the PGM/MM.
1033 *
1034 * This is used both on init and on runtime relocations.
1035 *
1036 * @param pVM The cross context VM structure.
1037 * @param offDelta Relocation delta relative to old location.
1038 */
1039VMMR3_INT_DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1040{
1041 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1042
1043 /*
1044 * The order here is very important!
1045 */
1046 PGMR3Relocate(pVM, offDelta);
1047 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1048 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1049 CPUMR3Relocate(pVM);
1050 HMR3Relocate(pVM);
1051 SELMR3Relocate(pVM);
1052 VMMR3Relocate(pVM, offDelta);
1053 SELMR3Relocate(pVM); /* !hack! fix stack! */
1054 TRPMR3Relocate(pVM, offDelta);
1055 IOMR3Relocate(pVM, offDelta);
1056 EMR3Relocate(pVM);
1057 TMR3Relocate(pVM, offDelta);
1058 IEMR3Relocate(pVM);
1059 DBGFR3Relocate(pVM, offDelta);
1060 PDMR3Relocate(pVM, offDelta);
1061 GIMR3Relocate(pVM, offDelta);
1062}
1063
1064
1065/**
1066 * EMT rendezvous worker for VMR3PowerOn.
1067 *
1068 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1069 * code, see FNVMMEMTRENDEZVOUS.)
1070 *
1071 * @param pVM The cross context VM structure.
1072 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1073 * @param pvUser Ignored.
1074 */
1075static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1076{
1077 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1078 Assert(!pvUser); NOREF(pvUser);
1079
1080 /*
1081 * The first thread thru here tries to change the state. We shouldn't be
1082 * called again if this fails.
1083 */
1084 if (pVCpu->idCpu == pVM->cCpus - 1)
1085 {
1086 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1087 if (RT_FAILURE(rc))
1088 return rc;
1089 }
1090
1091 VMSTATE enmVMState = VMR3GetState(pVM);
1092 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1093 ("%s\n", VMR3GetStateName(enmVMState)),
1094 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1095
1096 /*
1097 * All EMTs changes their state to started.
1098 */
1099 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1100
1101 /*
1102 * EMT(0) is last thru here and it will make the notification calls
1103 * and advance the state.
1104 */
1105 if (pVCpu->idCpu == 0)
1106 {
1107 PDMR3PowerOn(pVM);
1108 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1109 }
1110
1111 return VINF_SUCCESS;
1112}
1113
1114
1115/**
1116 * Powers on the virtual machine.
1117 *
1118 * @returns VBox status code.
1119 *
1120 * @param pUVM The VM to power on.
1121 *
1122 * @thread Any thread.
1123 * @vmstate Created
1124 * @vmstateto PoweringOn+Running
1125 */
1126VMMR3DECL(int) VMR3PowerOn(PUVM pUVM)
1127{
1128 LogFlow(("VMR3PowerOn: pUVM=%p\n", pUVM));
1129 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1130 PVM pVM = pUVM->pVM;
1131 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1132
1133 /*
1134 * Gather all the EMTs to reduce the init TSC drift and keep
1135 * the state changing APIs a bit uniform.
1136 */
1137 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1138 vmR3PowerOn, NULL);
1139 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1140 return rc;
1141}
1142
1143
1144/**
1145 * Does the suspend notifications.
1146 *
1147 * @param pVM The cross context VM structure.
1148 * @thread EMT(0)
1149 */
1150static void vmR3SuspendDoWork(PVM pVM)
1151{
1152 PDMR3Suspend(pVM);
1153}
1154
1155
1156/**
1157 * EMT rendezvous worker for VMR3Suspend.
1158 *
1159 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1160 * return code, see FNVMMEMTRENDEZVOUS.)
1161 *
1162 * @param pVM The cross context VM structure.
1163 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1164 * @param pvUser Ignored.
1165 */
1166static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1167{
1168 VMSUSPENDREASON enmReason = (VMSUSPENDREASON)(uintptr_t)pvUser;
1169 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1170
1171 /*
1172 * The first EMT switches the state to suspending. If this fails because
1173 * something was racing us in one way or the other, there will be no more
1174 * calls and thus the state assertion below is not going to annoy anyone.
1175 */
1176 if (pVCpu->idCpu == pVM->cCpus - 1)
1177 {
1178 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1179 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1180 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1181 if (RT_FAILURE(rc))
1182 return rc;
1183 pVM->pUVM->vm.s.enmSuspendReason = enmReason;
1184 }
1185
1186 VMSTATE enmVMState = VMR3GetState(pVM);
1187 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1188 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1189 ("%s\n", VMR3GetStateName(enmVMState)),
1190 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1191
1192 /*
1193 * EMT(0) does the actually suspending *after* all the other CPUs have
1194 * been thru here.
1195 */
1196 if (pVCpu->idCpu == 0)
1197 {
1198 vmR3SuspendDoWork(pVM);
1199
1200 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1201 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1202 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1203 if (RT_FAILURE(rc))
1204 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1205 }
1206
1207 return VINF_EM_SUSPEND;
1208}
1209
1210
1211/**
1212 * Suspends a running VM.
1213 *
1214 * @returns VBox status code. When called on EMT, this will be a strict status
1215 * code that has to be propagated up the call stack.
1216 *
1217 * @param pUVM The VM to suspend.
1218 * @param enmReason The reason for suspending.
1219 *
1220 * @thread Any thread.
1221 * @vmstate Running or RunningLS
1222 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1223 */
1224VMMR3DECL(int) VMR3Suspend(PUVM pUVM, VMSUSPENDREASON enmReason)
1225{
1226 LogFlow(("VMR3Suspend: pUVM=%p\n", pUVM));
1227 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1228 AssertReturn(enmReason > VMSUSPENDREASON_INVALID && enmReason < VMSUSPENDREASON_END, VERR_INVALID_PARAMETER);
1229
1230 /*
1231 * Gather all the EMTs to make sure there are no races before
1232 * changing the VM state.
1233 */
1234 int rc = VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1235 vmR3Suspend, (void *)(uintptr_t)enmReason);
1236 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1237 return rc;
1238}
1239
1240
1241/**
1242 * Retrieves the reason for the most recent suspend.
1243 *
1244 * @returns Suspend reason. VMSUSPENDREASON_INVALID if no suspend has been done
1245 * or the handle is invalid.
1246 * @param pUVM The user mode VM handle.
1247 */
1248VMMR3DECL(VMSUSPENDREASON) VMR3GetSuspendReason(PUVM pUVM)
1249{
1250 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSUSPENDREASON_INVALID);
1251 return pUVM->vm.s.enmSuspendReason;
1252}
1253
1254
1255/**
1256 * EMT rendezvous worker for VMR3Resume.
1257 *
1258 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1259 * return code, see FNVMMEMTRENDEZVOUS.)
1260 *
1261 * @param pVM The cross context VM structure.
1262 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1263 * @param pvUser Reason.
1264 */
1265static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1266{
1267 VMRESUMEREASON enmReason = (VMRESUMEREASON)(uintptr_t)pvUser;
1268 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1269
1270 /*
1271 * The first thread thru here tries to change the state. We shouldn't be
1272 * called again if this fails.
1273 */
1274 if (pVCpu->idCpu == pVM->cCpus - 1)
1275 {
1276 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1277 if (RT_FAILURE(rc))
1278 return rc;
1279 pVM->pUVM->vm.s.enmResumeReason = enmReason;
1280 }
1281
1282 VMSTATE enmVMState = VMR3GetState(pVM);
1283 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1284 ("%s\n", VMR3GetStateName(enmVMState)),
1285 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1286
1287#if 0
1288 /*
1289 * All EMTs changes their state to started.
1290 */
1291 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1292#endif
1293
1294 /*
1295 * EMT(0) is last thru here and it will make the notification calls
1296 * and advance the state.
1297 */
1298 if (pVCpu->idCpu == 0)
1299 {
1300 PDMR3Resume(pVM);
1301 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1302 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1303 }
1304
1305 return VINF_EM_RESUME;
1306}
1307
1308
1309/**
1310 * Resume VM execution.
1311 *
1312 * @returns VBox status code. When called on EMT, this will be a strict status
1313 * code that has to be propagated up the call stack.
1314 *
1315 * @param pUVM The user mode VM handle.
1316 * @param enmReason The reason we're resuming.
1317 *
1318 * @thread Any thread.
1319 * @vmstate Suspended
1320 * @vmstateto Running
1321 */
1322VMMR3DECL(int) VMR3Resume(PUVM pUVM, VMRESUMEREASON enmReason)
1323{
1324 LogFlow(("VMR3Resume: pUVM=%p\n", pUVM));
1325 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1326 PVM pVM = pUVM->pVM;
1327 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1328 AssertReturn(enmReason > VMRESUMEREASON_INVALID && enmReason < VMRESUMEREASON_END, VERR_INVALID_PARAMETER);
1329
1330 /*
1331 * Gather all the EMTs to make sure there are no races before
1332 * changing the VM state.
1333 */
1334 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1335 vmR3Resume, (void *)(uintptr_t)enmReason);
1336 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1337 return rc;
1338}
1339
1340
1341/**
1342 * Retrieves the reason for the most recent resume.
1343 *
1344 * @returns Resume reason. VMRESUMEREASON_INVALID if no suspend has been
1345 * done or the handle is invalid.
1346 * @param pUVM The user mode VM handle.
1347 */
1348VMMR3DECL(VMRESUMEREASON) VMR3GetResumeReason(PUVM pUVM)
1349{
1350 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMRESUMEREASON_INVALID);
1351 return pUVM->vm.s.enmResumeReason;
1352}
1353
1354
1355/**
1356 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1357 * after the live step has been completed.
1358 *
1359 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1360 * return code, see FNVMMEMTRENDEZVOUS.)
1361 *
1362 * @param pVM The cross context VM structure.
1363 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1364 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1365 */
1366static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1367{
1368 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1369 bool *pfSuspended = (bool *)pvUser;
1370
1371 /*
1372 * The first thread thru here tries to change the state. We shouldn't be
1373 * called again if this fails.
1374 */
1375 if (pVCpu->idCpu == pVM->cCpus - 1U)
1376 {
1377 PUVM pUVM = pVM->pUVM;
1378 int rc;
1379
1380 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1381 VMSTATE enmVMState = pVM->enmVMState;
1382 switch (enmVMState)
1383 {
1384 case VMSTATE_RUNNING_LS:
1385 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS, false /*fSetRatherThanClearFF*/);
1386 rc = VINF_SUCCESS;
1387 break;
1388
1389 case VMSTATE_SUSPENDED_EXT_LS:
1390 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1391 rc = VINF_SUCCESS;
1392 break;
1393
1394 case VMSTATE_DEBUGGING_LS:
1395 rc = VERR_TRY_AGAIN;
1396 break;
1397
1398 case VMSTATE_OFF_LS:
1399 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS, false /*fSetRatherThanClearFF*/);
1400 rc = VERR_SSM_LIVE_POWERED_OFF;
1401 break;
1402
1403 case VMSTATE_FATAL_ERROR_LS:
1404 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, false /*fSetRatherThanClearFF*/);
1405 rc = VERR_SSM_LIVE_FATAL_ERROR;
1406 break;
1407
1408 case VMSTATE_GURU_MEDITATION_LS:
1409 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, false /*fSetRatherThanClearFF*/);
1410 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1411 break;
1412
1413 case VMSTATE_POWERING_OFF_LS:
1414 case VMSTATE_SUSPENDING_EXT_LS:
1415 case VMSTATE_RESETTING_LS:
1416 default:
1417 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1418 rc = VERR_VM_UNEXPECTED_VM_STATE;
1419 break;
1420 }
1421 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1422 if (RT_FAILURE(rc))
1423 {
1424 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1425 return rc;
1426 }
1427 }
1428
1429 VMSTATE enmVMState = VMR3GetState(pVM);
1430 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1431 ("%s\n", VMR3GetStateName(enmVMState)),
1432 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1433
1434 /*
1435 * Only EMT(0) have work to do since it's last thru here.
1436 */
1437 if (pVCpu->idCpu == 0)
1438 {
1439 vmR3SuspendDoWork(pVM);
1440 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1441 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1442 if (RT_FAILURE(rc))
1443 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1444
1445 *pfSuspended = true;
1446 }
1447
1448 return VINF_EM_SUSPEND;
1449}
1450
1451
1452/**
1453 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1454 * SSMR3LiveDoStep1 failure.
1455 *
1456 * Doing this as a rendezvous operation avoids all annoying transition
1457 * states.
1458 *
1459 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1460 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1461 *
1462 * @param pVM The cross context VM structure.
1463 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1464 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1465 */
1466static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1467{
1468 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1469 bool *pfSuspended = (bool *)pvUser;
1470 NOREF(pVCpu);
1471
1472 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1473 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1474 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1475 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1476 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1477 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1478 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1479 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1480 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1481 if (rc == 1)
1482 rc = VERR_SSM_LIVE_POWERED_OFF;
1483 else if (rc == 2)
1484 rc = VERR_SSM_LIVE_FATAL_ERROR;
1485 else if (rc == 3)
1486 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1487 else if (rc == 4)
1488 {
1489 *pfSuspended = true;
1490 rc = VINF_SUCCESS;
1491 }
1492 else if (rc > 0)
1493 rc = VINF_SUCCESS;
1494 return rc;
1495}
1496
1497
1498/**
1499 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1500 *
1501 * @returns VBox status code.
1502 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1503 *
1504 * @param pVM The cross context VM structure.
1505 * @param pSSM The handle of saved state operation.
1506 *
1507 * @thread EMT(0)
1508 */
1509static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1510{
1511 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1512 VM_ASSERT_EMT0(pVM);
1513
1514 /*
1515 * Advance the state and mark if VMR3Suspend was called.
1516 */
1517 int rc = VINF_SUCCESS;
1518 VMSTATE enmVMState = VMR3GetState(pVM);
1519 if (enmVMState == VMSTATE_SUSPENDED_LS)
1520 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1521 else
1522 {
1523 if (enmVMState != VMSTATE_SAVING)
1524 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1525 rc = VINF_SSM_LIVE_SUSPENDED;
1526 }
1527
1528 /*
1529 * Finish up and release the handle. Careful with the status codes.
1530 */
1531 int rc2 = SSMR3LiveDoStep2(pSSM);
1532 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1533 rc = rc2;
1534
1535 rc2 = SSMR3LiveDone(pSSM);
1536 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1537 rc = rc2;
1538
1539 /*
1540 * Advance to the final state and return.
1541 */
1542 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1543 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1544 return rc;
1545}
1546
1547
1548/**
1549 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1550 * SSMR3LiveSave.
1551 *
1552 * @returns VBox status code.
1553 *
1554 * @param pVM The cross context VM structure.
1555 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1556 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1557 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1558 * @param pvStreamOpsUser The user argument to the stream methods.
1559 * @param enmAfter What to do afterwards.
1560 * @param pfnProgress Progress callback. Optional.
1561 * @param pvProgressUser User argument for the progress callback.
1562 * @param ppSSM Where to return the saved state handle in case of a
1563 * live snapshot scenario.
1564 *
1565 * @thread EMT
1566 */
1567static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1568 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM)
1569{
1570 int rc = VINF_SUCCESS;
1571
1572 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1573 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1574
1575 /*
1576 * Validate input.
1577 */
1578 AssertPtrNull(pszFilename);
1579 AssertPtrNull(pStreamOps);
1580 AssertPtr(pVM);
1581 Assert( enmAfter == SSMAFTER_DESTROY
1582 || enmAfter == SSMAFTER_CONTINUE
1583 || enmAfter == SSMAFTER_TELEPORT);
1584 AssertPtr(ppSSM);
1585 *ppSSM = NULL;
1586
1587 /*
1588 * Change the state and perform/start the saving.
1589 */
1590 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1591 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1592 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1593 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1594 {
1595 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1596 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1597 }
1598 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1599 {
1600 if (enmAfter == SSMAFTER_TELEPORT)
1601 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1602 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1603 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1604 /* (We're not subject to cancellation just yet.) */
1605 }
1606 else
1607 Assert(RT_FAILURE(rc));
1608 return rc;
1609}
1610
1611
1612/**
1613 * Common worker for VMR3Save and VMR3Teleport.
1614 *
1615 * @returns VBox status code.
1616 *
1617 * @param pVM The cross context VM structure.
1618 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1619 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1620 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1621 * @param pvStreamOpsUser The user argument to the stream methods.
1622 * @param enmAfter What to do afterwards.
1623 * @param pfnProgress Progress callback. Optional.
1624 * @param pvProgressUser User argument for the progress callback.
1625 * @param pfSuspended Set if we suspended the VM.
1626 *
1627 * @thread Non-EMT
1628 */
1629static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1630 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1631 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1632{
1633 /*
1634 * Request the operation in EMT(0).
1635 */
1636 PSSMHANDLE pSSM;
1637 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/,
1638 (PFNRT)vmR3Save, 9, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1639 enmAfter, pfnProgress, pvProgressUser, &pSSM);
1640 if ( RT_SUCCESS(rc)
1641 && pSSM)
1642 {
1643 /*
1644 * Live snapshot.
1645 *
1646 * The state handling here is kind of tricky, doing it on EMT(0) helps
1647 * a bit. See the VMSTATE diagram for details.
1648 */
1649 rc = SSMR3LiveDoStep1(pSSM);
1650 if (RT_SUCCESS(rc))
1651 {
1652 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1653 for (;;)
1654 {
1655 /* Try suspend the VM. */
1656 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1657 vmR3LiveDoSuspend, pfSuspended);
1658 if (rc != VERR_TRY_AGAIN)
1659 break;
1660
1661 /* Wait for the state to change. */
1662 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1663 }
1664 if (RT_SUCCESS(rc))
1665 rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1666 else
1667 {
1668 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1669 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); NOREF(rc2);
1670 }
1671 }
1672 else
1673 {
1674 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1675 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1676
1677 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1678 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1679 rc = rc2;
1680 }
1681 }
1682
1683 return rc;
1684}
1685
1686
1687/**
1688 * Save current VM state.
1689 *
1690 * Can be used for both saving the state and creating snapshots.
1691 *
1692 * When called for a VM in the Running state, the saved state is created live
1693 * and the VM is only suspended when the final part of the saving is preformed.
1694 * The VM state will not be restored to Running in this case and it's up to the
1695 * caller to call VMR3Resume if this is desirable. (The rational is that the
1696 * caller probably wish to reconfigure the disks before resuming the VM.)
1697 *
1698 * @returns VBox status code.
1699 *
1700 * @param pUVM The VM which state should be saved.
1701 * @param pszFilename The name of the save state file.
1702 * @param fContinueAfterwards Whether continue execution afterwards or not.
1703 * When in doubt, set this to true.
1704 * @param pfnProgress Progress callback. Optional.
1705 * @param pvUser User argument for the progress callback.
1706 * @param pfSuspended Set if we suspended the VM.
1707 *
1708 * @thread Non-EMT.
1709 * @vmstate Suspended or Running
1710 * @vmstateto Saving+Suspended or
1711 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1712 */
1713VMMR3DECL(int) VMR3Save(PUVM pUVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser,
1714 bool *pfSuspended)
1715{
1716 LogFlow(("VMR3Save: pUVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1717 pUVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1718
1719 /*
1720 * Validate input.
1721 */
1722 AssertPtr(pfSuspended);
1723 *pfSuspended = false;
1724 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1725 PVM pVM = pUVM->pVM;
1726 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1727 VM_ASSERT_OTHER_THREAD(pVM);
1728 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1729 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1730 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1731
1732 /*
1733 * Join paths with VMR3Teleport.
1734 */
1735 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1736 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1737 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1738 enmAfter, pfnProgress, pvUser, pfSuspended);
1739 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1740 return rc;
1741}
1742
1743
1744/**
1745 * Teleport the VM (aka live migration).
1746 *
1747 * @returns VBox status code.
1748 *
1749 * @param pUVM The VM which state should be saved.
1750 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1751 * @param pStreamOps The stream methods.
1752 * @param pvStreamOpsUser The user argument to the stream methods.
1753 * @param pfnProgress Progress callback. Optional.
1754 * @param pvProgressUser User argument for the progress callback.
1755 * @param pfSuspended Set if we suspended the VM.
1756 *
1757 * @thread Non-EMT.
1758 * @vmstate Suspended or Running
1759 * @vmstateto Saving+Suspended or
1760 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1761 */
1762VMMR3DECL(int) VMR3Teleport(PUVM pUVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1763 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1764{
1765 LogFlow(("VMR3Teleport: pUVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1766 pUVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1767
1768 /*
1769 * Validate input.
1770 */
1771 AssertPtr(pfSuspended);
1772 *pfSuspended = false;
1773 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1774 PVM pVM = pUVM->pVM;
1775 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1776 VM_ASSERT_OTHER_THREAD(pVM);
1777 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1778 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1779
1780 /*
1781 * Join paths with VMR3Save.
1782 */
1783 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime, NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1784 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended);
1785 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1786 return rc;
1787}
1788
1789
1790
1791/**
1792 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1793 *
1794 * @returns VBox status code.
1795 *
1796 * @param pUVM Pointer to the VM.
1797 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1798 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1799 * @param pvStreamOpsUser The user argument to the stream methods.
1800 * @param pfnProgress Progress callback. Optional.
1801 * @param pvProgressUser User argument for the progress callback.
1802 * @param fTeleporting Indicates whether we're teleporting or not.
1803 *
1804 * @thread EMT.
1805 */
1806static DECLCALLBACK(int) vmR3Load(PUVM pUVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1807 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting)
1808{
1809 LogFlow(("vmR3Load: pUVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1810 pUVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1811
1812 /*
1813 * Validate input (paranoia).
1814 */
1815 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1816 PVM pVM = pUVM->pVM;
1817 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1818 AssertPtrNull(pszFilename);
1819 AssertPtrNull(pStreamOps);
1820 AssertPtrNull(pfnProgress);
1821
1822 /*
1823 * Change the state and perform the load.
1824 *
1825 * Always perform a relocation round afterwards to make sure hypervisor
1826 * selectors and such are correct.
1827 */
1828 int rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1829 VMSTATE_LOADING, VMSTATE_CREATED,
1830 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1831 if (RT_FAILURE(rc))
1832 return rc;
1833
1834 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1835
1836 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pUVM);
1837 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1838 if (RT_SUCCESS(rc))
1839 {
1840 VMR3Relocate(pVM, 0 /*offDelta*/);
1841 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1842 }
1843 else
1844 {
1845 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1846 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1847
1848 if (cErrorsPriorToSave == VMR3GetErrorCount(pUVM))
1849 rc = VMSetError(pVM, rc, RT_SRC_POS,
1850 N_("Unable to restore the virtual machine's saved state from '%s'. "
1851 "It may be damaged or from an older version of VirtualBox. "
1852 "Please discard the saved state before starting the virtual machine"),
1853 pszFilename);
1854 }
1855
1856 return rc;
1857}
1858
1859
1860/**
1861 * Loads a VM state into a newly created VM or a one that is suspended.
1862 *
1863 * To restore a saved state on VM startup, call this function and then resume
1864 * the VM instead of powering it on.
1865 *
1866 * @returns VBox status code.
1867 *
1868 * @param pUVM The user mode VM structure.
1869 * @param pszFilename The name of the save state file.
1870 * @param pfnProgress Progress callback. Optional.
1871 * @param pvUser User argument for the progress callback.
1872 *
1873 * @thread Any thread.
1874 * @vmstate Created, Suspended
1875 * @vmstateto Loading+Suspended
1876 */
1877VMMR3DECL(int) VMR3LoadFromFile(PUVM pUVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
1878{
1879 LogFlow(("VMR3LoadFromFile: pUVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
1880 pUVM, pszFilename, pszFilename, pfnProgress, pvUser));
1881
1882 /*
1883 * Validate input.
1884 */
1885 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1886 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1887
1888 /*
1889 * Forward the request to EMT(0). No need to setup a rendezvous here
1890 * since there is no execution taking place when this call is allowed.
1891 */
1892 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
1893 pUVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/,
1894 pfnProgress, pvUser, false /*fTeleporting*/);
1895 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
1896 return rc;
1897}
1898
1899
1900/**
1901 * VMR3LoadFromFile for arbitrary file streams.
1902 *
1903 * @returns VBox status code.
1904 *
1905 * @param pUVM Pointer to the VM.
1906 * @param pStreamOps The stream methods.
1907 * @param pvStreamOpsUser The user argument to the stream methods.
1908 * @param pfnProgress Progress callback. Optional.
1909 * @param pvProgressUser User argument for the progress callback.
1910 *
1911 * @thread Any thread.
1912 * @vmstate Created, Suspended
1913 * @vmstateto Loading+Suspended
1914 */
1915VMMR3DECL(int) VMR3LoadFromStream(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1916 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
1917{
1918 LogFlow(("VMR3LoadFromStream: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
1919 pUVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1920
1921 /*
1922 * Validate input.
1923 */
1924 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1925 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1926
1927 /*
1928 * Forward the request to EMT(0). No need to setup a rendezvous here
1929 * since there is no execution taking place when this call is allowed.
1930 */
1931 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
1932 pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress,
1933 pvProgressUser, true /*fTeleporting*/);
1934 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
1935 return rc;
1936}
1937
1938
1939/**
1940 * EMT rendezvous worker for VMR3PowerOff.
1941 *
1942 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
1943 * return code, see FNVMMEMTRENDEZVOUS.)
1944 *
1945 * @param pVM The cross context VM structure.
1946 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1947 * @param pvUser Ignored.
1948 */
1949static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
1950{
1951 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1952 Assert(!pvUser); NOREF(pvUser);
1953
1954 /*
1955 * The first EMT thru here will change the state to PoweringOff.
1956 */
1957 if (pVCpu->idCpu == pVM->cCpus - 1)
1958 {
1959 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
1960 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
1961 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
1962 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
1963 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
1964 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
1965 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
1966 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
1967 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
1968 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
1969 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
1970 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
1971 if (RT_FAILURE(rc))
1972 return rc;
1973 if (rc >= 7)
1974 SSMR3Cancel(pVM->pUVM);
1975 }
1976
1977 /*
1978 * Check the state.
1979 */
1980 VMSTATE enmVMState = VMR3GetState(pVM);
1981 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
1982 || enmVMState == VMSTATE_POWERING_OFF_LS,
1983 ("%s\n", VMR3GetStateName(enmVMState)),
1984 VERR_VM_INVALID_VM_STATE);
1985
1986 /*
1987 * EMT(0) does the actual power off work here *after* all the other EMTs
1988 * have been thru and entered the STOPPED state.
1989 */
1990 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
1991 if (pVCpu->idCpu == 0)
1992 {
1993 /*
1994 * For debugging purposes, we will log a summary of the guest state at this point.
1995 */
1996 if (enmVMState != VMSTATE_GURU_MEDITATION)
1997 {
1998 /** @todo make the state dumping at VMR3PowerOff optional. */
1999 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2000 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
2001 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2002 RTLogRelPrintf("***\n");
2003 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
2004 RTLogRelPrintf("***\n");
2005 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2006 RTLogRelPrintf("***\n");
2007 DBGFR3Info(pVM->pUVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2008 RTLogRelPrintf("***\n");
2009 DBGFR3Info(pVM->pUVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2010 /** @todo dump guest call stack. */
2011 RTLogRelSetBuffering(fOldBuffered);
2012 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2013 }
2014
2015 /*
2016 * Perform the power off notifications and advance the state to
2017 * Off or OffLS.
2018 */
2019 PDMR3PowerOff(pVM);
2020 DBGFR3PowerOff(pVM);
2021
2022 PUVM pUVM = pVM->pUVM;
2023 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2024 enmVMState = pVM->enmVMState;
2025 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2026 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS, false /*fSetRatherThanClearFF*/);
2027 else
2028 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF, false /*fSetRatherThanClearFF*/);
2029 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2030 }
2031 else if (enmVMState != VMSTATE_GURU_MEDITATION)
2032 {
2033 /** @todo make the state dumping at VMR3PowerOff optional. */
2034 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2035 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
2036 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2037 RTLogRelPrintf("***\n");
2038 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
2039 RTLogRelPrintf("***\n");
2040 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2041 RTLogRelPrintf("***\n");
2042 RTLogRelSetBuffering(fOldBuffered);
2043 RTLogRelPrintf("************** End of Guest state at power off for VCpu %u ***************\n", pVCpu->idCpu);
2044 }
2045
2046 return VINF_EM_OFF;
2047}
2048
2049
2050/**
2051 * Power off the VM.
2052 *
2053 * @returns VBox status code. When called on EMT, this will be a strict status
2054 * code that has to be propagated up the call stack.
2055 *
2056 * @param pUVM The handle of the VM to be powered off.
2057 *
2058 * @thread Any thread.
2059 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2060 * @vmstateto Off or OffLS
2061 */
2062VMMR3DECL(int) VMR3PowerOff(PUVM pUVM)
2063{
2064 LogFlow(("VMR3PowerOff: pUVM=%p\n", pUVM));
2065 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2066 PVM pVM = pUVM->pVM;
2067 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2068
2069 /*
2070 * Gather all the EMTs to make sure there are no races before
2071 * changing the VM state.
2072 */
2073 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2074 vmR3PowerOff, NULL);
2075 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2076 return rc;
2077}
2078
2079
2080/**
2081 * Destroys the VM.
2082 *
2083 * The VM must be powered off (or never really powered on) to call this
2084 * function. The VM handle is destroyed and can no longer be used up successful
2085 * return.
2086 *
2087 * @returns VBox status code.
2088 *
2089 * @param pUVM The user mode VM handle.
2090 *
2091 * @thread Any none emulation thread.
2092 * @vmstate Off, Created
2093 * @vmstateto N/A
2094 */
2095VMMR3DECL(int) VMR3Destroy(PUVM pUVM)
2096{
2097 LogFlow(("VMR3Destroy: pUVM=%p\n", pUVM));
2098
2099 /*
2100 * Validate input.
2101 */
2102 if (!pUVM)
2103 return VERR_INVALID_VM_HANDLE;
2104 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2105 PVM pVM = pUVM->pVM;
2106 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2107 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2108
2109 /*
2110 * Change VM state to destroying and aall vmR3Destroy on each of the EMTs
2111 * ending with EMT(0) doing the bulk of the cleanup.
2112 */
2113 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2114 if (RT_FAILURE(rc))
2115 return rc;
2116
2117 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2118 AssertLogRelRC(rc);
2119
2120 /*
2121 * Wait for EMTs to quit and destroy the UVM.
2122 */
2123 vmR3DestroyUVM(pUVM, 30000);
2124
2125 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2126 return VINF_SUCCESS;
2127}
2128
2129
2130/**
2131 * Internal destruction worker.
2132 *
2133 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2134 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2135 * VMR3Destroy().
2136 *
2137 * When called on EMT(0), it will performed the great bulk of the destruction.
2138 * When called on the other EMTs, they will do nothing and the whole purpose is
2139 * to return VINF_EM_TERMINATE so they break out of their run loops.
2140 *
2141 * @returns VINF_EM_TERMINATE.
2142 * @param pVM The cross context VM structure.
2143 */
2144DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2145{
2146 PUVM pUVM = pVM->pUVM;
2147 PVMCPU pVCpu = VMMGetCpu(pVM);
2148 Assert(pVCpu);
2149 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2150
2151 /*
2152 * Only VCPU 0 does the full cleanup (last).
2153 */
2154 if (pVCpu->idCpu == 0)
2155 {
2156 /*
2157 * Dump statistics to the log.
2158 */
2159#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2160 RTLogFlags(NULL, "nodisabled nobuffered");
2161#endif
2162//#ifdef VBOX_WITH_STATISTICS
2163// STAMR3Dump(pUVM, "*");
2164//#else
2165 LogRel(("************************* Statistics *************************\n"));
2166 STAMR3DumpToReleaseLog(pUVM, "*");
2167 LogRel(("********************* End of statistics **********************\n"));
2168//#endif
2169
2170 /*
2171 * Destroy the VM components.
2172 */
2173 int rc = TMR3Term(pVM);
2174 AssertRC(rc);
2175#ifdef VBOX_WITH_DEBUGGER
2176 rc = DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
2177 pUVM->vm.s.pvDBGC = NULL;
2178#endif
2179 AssertRC(rc);
2180 rc = PDMR3Term(pVM);
2181 AssertRC(rc);
2182 rc = GIMR3Term(pVM);
2183 AssertRC(rc);
2184 rc = DBGFR3Term(pVM);
2185 AssertRC(rc);
2186 rc = IEMR3Term(pVM);
2187 AssertRC(rc);
2188 rc = EMR3Term(pVM);
2189 AssertRC(rc);
2190 rc = IOMR3Term(pVM);
2191 AssertRC(rc);
2192 rc = TRPMR3Term(pVM);
2193 AssertRC(rc);
2194 rc = SELMR3Term(pVM);
2195 AssertRC(rc);
2196 rc = HMR3Term(pVM);
2197 AssertRC(rc);
2198 rc = NEMR3Term(pVM);
2199 AssertRC(rc);
2200 rc = PGMR3Term(pVM);
2201 AssertRC(rc);
2202 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2203 AssertRC(rc);
2204 rc = CPUMR3Term(pVM);
2205 AssertRC(rc);
2206 SSMR3Term(pVM);
2207 rc = PDMR3CritSectBothTerm(pVM);
2208 AssertRC(rc);
2209 rc = MMR3Term(pVM);
2210 AssertRC(rc);
2211
2212 /*
2213 * We're done, tell the other EMTs to quit.
2214 */
2215 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2216 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2217 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2218 }
2219
2220 /*
2221 * Decrement the active EMT count here.
2222 */
2223 PUVMCPU pUVCpu = &pUVM->aCpus[pVCpu->idCpu];
2224 if (!pUVCpu->vm.s.fBeenThruVmDestroy)
2225 {
2226 pUVCpu->vm.s.fBeenThruVmDestroy = true;
2227 ASMAtomicDecU32(&pUVM->vm.s.cActiveEmts);
2228 }
2229 else
2230 AssertFailed();
2231
2232 return VINF_EM_TERMINATE;
2233}
2234
2235
2236/**
2237 * Destroys the UVM portion.
2238 *
2239 * This is called as the final step in the VM destruction or as the cleanup
2240 * in case of a creation failure.
2241 *
2242 * @param pUVM The user mode VM structure.
2243 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2244 * threads.
2245 */
2246static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2247{
2248 /*
2249 * Signal termination of each the emulation threads and
2250 * wait for them to complete.
2251 */
2252 /* Signal them - in reverse order since EMT(0) waits for the others. */
2253 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2254 if (pUVM->pVM)
2255 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2256 VMCPUID iCpu = pUVM->cCpus;
2257 while (iCpu-- > 0)
2258 {
2259 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2260 RTSemEventSignal(pUVM->aCpus[iCpu].vm.s.EventSemWait);
2261 }
2262
2263 /* Wait for EMT(0), it in turn waits for the rest. */
2264 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2265
2266 RTTHREAD const hSelf = RTThreadSelf();
2267 RTTHREAD hThread = pUVM->aCpus[0].vm.s.ThreadEMT;
2268 if ( hThread != NIL_RTTHREAD
2269 && hThread != hSelf)
2270 {
2271 int rc2 = RTThreadWait(hThread, RT_MAX(cMilliesEMTWait, 2000), NULL);
2272 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2273 rc2 = RTThreadWait(hThread, 1000, NULL);
2274 AssertLogRelMsgRC(rc2, ("iCpu=0 rc=%Rrc\n", rc2));
2275 if (RT_SUCCESS(rc2))
2276 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2277 }
2278
2279 /* Just in case we're in a weird failure situation w/o EMT(0) to do the
2280 waiting, wait the other EMTs too. */
2281 for (iCpu = 1; iCpu < pUVM->cCpus; iCpu++)
2282 {
2283 ASMAtomicXchgHandle(&pUVM->aCpus[iCpu].vm.s.ThreadEMT, NIL_RTTHREAD, &hThread);
2284 if (hThread != NIL_RTTHREAD)
2285 {
2286 if (hThread != hSelf)
2287 {
2288 int rc2 = RTThreadWait(hThread, 250 /*ms*/, NULL);
2289 AssertLogRelMsgRC(rc2, ("iCpu=%u rc=%Rrc\n", iCpu, rc2));
2290 if (RT_SUCCESS(rc2))
2291 continue;
2292 }
2293 pUVM->aCpus[iCpu].vm.s.ThreadEMT = hThread;
2294 }
2295 }
2296
2297 /* Cleanup the semaphores. */
2298 iCpu = pUVM->cCpus;
2299 while (iCpu-- > 0)
2300 {
2301 RTSemEventDestroy(pUVM->aCpus[iCpu].vm.s.EventSemWait);
2302 pUVM->aCpus[iCpu].vm.s.EventSemWait = NIL_RTSEMEVENT;
2303 }
2304
2305 /*
2306 * Free the event semaphores associated with the request packets.
2307 */
2308 unsigned cReqs = 0;
2309 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2310 {
2311 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2312 pUVM->vm.s.apReqFree[i] = NULL;
2313 for (; pReq; pReq = pReq->pNext, cReqs++)
2314 {
2315 pReq->enmState = VMREQSTATE_INVALID;
2316 RTSemEventDestroy(pReq->EventSem);
2317 }
2318 }
2319 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2320
2321 /*
2322 * Kill all queued requests. (There really shouldn't be any!)
2323 */
2324 for (unsigned i = 0; i < 10; i++)
2325 {
2326 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pPriorityReqs, NULL, PVMREQ);
2327 if (!pReqHead)
2328 {
2329 pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pNormalReqs, NULL, PVMREQ);
2330 if (!pReqHead)
2331 break;
2332 }
2333 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2334
2335 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2336 {
2337 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2338 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2339 RTSemEventSignal(pReq->EventSem);
2340 RTThreadSleep(2);
2341 RTSemEventDestroy(pReq->EventSem);
2342 }
2343 /* give them a chance to respond before we free the request memory. */
2344 RTThreadSleep(32);
2345 }
2346
2347 /*
2348 * Now all queued VCPU requests (again, there shouldn't be any).
2349 */
2350 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2351 {
2352 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2353
2354 for (unsigned i = 0; i < 10; i++)
2355 {
2356 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pPriorityReqs, NULL, PVMREQ);
2357 if (!pReqHead)
2358 {
2359 pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pNormalReqs, NULL, PVMREQ);
2360 if (!pReqHead)
2361 break;
2362 }
2363 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2364
2365 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2366 {
2367 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2368 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2369 RTSemEventSignal(pReq->EventSem);
2370 RTThreadSleep(2);
2371 RTSemEventDestroy(pReq->EventSem);
2372 }
2373 /* give them a chance to respond before we free the request memory. */
2374 RTThreadSleep(32);
2375 }
2376 }
2377
2378 /*
2379 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2380 */
2381 PDMR3TermUVM(pUVM);
2382
2383 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
2384 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
2385
2386 /*
2387 * Terminate the support library if initialized.
2388 */
2389 if (pUVM->vm.s.pSession)
2390 {
2391 int rc = SUPR3Term(false /*fForced*/);
2392 AssertRC(rc);
2393 pUVM->vm.s.pSession = NIL_RTR0PTR;
2394 }
2395
2396 /*
2397 * Release the UVM structure reference.
2398 */
2399 VMR3ReleaseUVM(pUVM);
2400
2401 /*
2402 * Clean up and flush logs.
2403 */
2404 RTLogFlush(NULL);
2405}
2406
2407
2408/**
2409 * Worker which checks integrity of some internal structures.
2410 * This is yet another attempt to track down that AVL tree crash.
2411 */
2412static void vmR3CheckIntegrity(PVM pVM)
2413{
2414#ifdef VBOX_STRICT
2415 int rc = PGMR3CheckIntegrity(pVM);
2416 AssertReleaseRC(rc);
2417#else
2418 RT_NOREF_PV(pVM);
2419#endif
2420}
2421
2422
2423/**
2424 * EMT rendezvous worker for VMR3ResetFF for doing soft/warm reset.
2425 *
2426 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESCHEDULE.
2427 * (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
2428 *
2429 * @param pVM The cross context VM structure.
2430 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2431 * @param pvUser The reset flags.
2432 */
2433static DECLCALLBACK(VBOXSTRICTRC) vmR3SoftReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2434{
2435 uint32_t fResetFlags = *(uint32_t *)pvUser;
2436
2437
2438 /*
2439 * The first EMT will try change the state to resetting. If this fails,
2440 * we won't get called for the other EMTs.
2441 */
2442 if (pVCpu->idCpu == pVM->cCpus - 1)
2443 {
2444 int rc = vmR3TrySetState(pVM, "vmR3ResetSoft", 3,
2445 VMSTATE_SOFT_RESETTING, VMSTATE_RUNNING,
2446 VMSTATE_SOFT_RESETTING, VMSTATE_SUSPENDED,
2447 VMSTATE_SOFT_RESETTING_LS, VMSTATE_RUNNING_LS);
2448 if (RT_FAILURE(rc))
2449 return rc;
2450 pVM->vm.s.cResets++;
2451 pVM->vm.s.cSoftResets++;
2452 }
2453
2454 /*
2455 * Check the state.
2456 */
2457 VMSTATE enmVMState = VMR3GetState(pVM);
2458 AssertLogRelMsgReturn( enmVMState == VMSTATE_SOFT_RESETTING
2459 || enmVMState == VMSTATE_SOFT_RESETTING_LS,
2460 ("%s\n", VMR3GetStateName(enmVMState)),
2461 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2462
2463 /*
2464 * EMT(0) does the full cleanup *after* all the other EMTs has been
2465 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2466 *
2467 * Because there are per-cpu reset routines and order may/is important,
2468 * the following sequence looks a bit ugly...
2469 */
2470
2471 /* Reset the VCpu state. */
2472 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2473
2474 /*
2475 * Soft reset the VM components.
2476 */
2477 if (pVCpu->idCpu == 0)
2478 {
2479 PDMR3SoftReset(pVM, fResetFlags);
2480 TRPMR3Reset(pVM);
2481 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2482 EMR3Reset(pVM);
2483 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2484 NEMR3Reset(pVM);
2485
2486 /*
2487 * Since EMT(0) is the last to go thru here, it will advance the state.
2488 * (Unlike vmR3HardReset we won't be doing any suspending of live
2489 * migration VMs here since memory is unchanged.)
2490 */
2491 PUVM pUVM = pVM->pUVM;
2492 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2493 enmVMState = pVM->enmVMState;
2494 if (enmVMState == VMSTATE_SOFT_RESETTING)
2495 {
2496 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2497 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2498 else
2499 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2500 }
2501 else
2502 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING_LS, VMSTATE_SOFT_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2503 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2504 }
2505
2506 return VINF_EM_RESCHEDULE;
2507}
2508
2509
2510/**
2511 * EMT rendezvous worker for VMR3Reset and VMR3ResetFF.
2512 *
2513 * This is called by the emulation threads as a response to the reset request
2514 * issued by VMR3Reset().
2515 *
2516 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2517 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2518 *
2519 * @param pVM The cross context VM structure.
2520 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2521 * @param pvUser Ignored.
2522 */
2523static DECLCALLBACK(VBOXSTRICTRC) vmR3HardReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2524{
2525 Assert(!pvUser); NOREF(pvUser);
2526
2527 /*
2528 * The first EMT will try change the state to resetting. If this fails,
2529 * we won't get called for the other EMTs.
2530 */
2531 if (pVCpu->idCpu == pVM->cCpus - 1)
2532 {
2533 int rc = vmR3TrySetState(pVM, "vmR3HardReset", 3,
2534 VMSTATE_RESETTING, VMSTATE_RUNNING,
2535 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2536 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2537 if (RT_FAILURE(rc))
2538 return rc;
2539 pVM->vm.s.cResets++;
2540 pVM->vm.s.cHardResets++;
2541 }
2542
2543 /*
2544 * Check the state.
2545 */
2546 VMSTATE enmVMState = VMR3GetState(pVM);
2547 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2548 || enmVMState == VMSTATE_RESETTING_LS,
2549 ("%s\n", VMR3GetStateName(enmVMState)),
2550 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2551
2552 /*
2553 * EMT(0) does the full cleanup *after* all the other EMTs has been
2554 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2555 *
2556 * Because there are per-cpu reset routines and order may/is important,
2557 * the following sequence looks a bit ugly...
2558 */
2559 if (pVCpu->idCpu == 0)
2560 vmR3CheckIntegrity(pVM);
2561
2562 /* Reset the VCpu state. */
2563 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2564
2565 /* Clear all pending forced actions. */
2566 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2567
2568 /*
2569 * Reset the VM components.
2570 */
2571 if (pVCpu->idCpu == 0)
2572 {
2573 GIMR3Reset(pVM); /* This must come *before* PDM and TM. */
2574 PDMR3Reset(pVM);
2575 PGMR3Reset(pVM);
2576 SELMR3Reset(pVM);
2577 TRPMR3Reset(pVM);
2578 IOMR3Reset(pVM);
2579 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2580 TMR3Reset(pVM);
2581 EMR3Reset(pVM);
2582 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2583 NEMR3Reset(pVM);
2584
2585 /*
2586 * Do memory setup.
2587 */
2588 PGMR3MemSetup(pVM, true /*fAtReset*/);
2589 PDMR3MemSetup(pVM, true /*fAtReset*/);
2590
2591 /*
2592 * Since EMT(0) is the last to go thru here, it will advance the state.
2593 * When a live save is active, we will move on to SuspendingLS but
2594 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2595 */
2596 PUVM pUVM = pVM->pUVM;
2597 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2598 enmVMState = pVM->enmVMState;
2599 if (enmVMState == VMSTATE_RESETTING)
2600 {
2601 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2602 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2603 else
2604 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2605 }
2606 else
2607 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2608 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2609
2610 vmR3CheckIntegrity(pVM);
2611
2612 /*
2613 * Do the suspend bit as well.
2614 * It only requires some EMT(0) work at present.
2615 */
2616 if (enmVMState != VMSTATE_RESETTING)
2617 {
2618 vmR3SuspendDoWork(pVM);
2619 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2620 }
2621 }
2622
2623 return enmVMState == VMSTATE_RESETTING
2624 ? VINF_EM_RESET
2625 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2626}
2627
2628
2629/**
2630 * Internal worker for VMR3Reset, VMR3ResetFF, VMR3TripleFault.
2631 *
2632 * @returns VBox status code.
2633 * @param pVM The cross context VM structure.
2634 * @param fHardReset Whether it's a hard reset or not.
2635 * @param fResetFlags The reset flags (PDMVMRESET_F_XXX).
2636 */
2637static VBOXSTRICTRC vmR3ResetCommon(PVM pVM, bool fHardReset, uint32_t fResetFlags)
2638{
2639 LogFlow(("vmR3ResetCommon: fHardReset=%RTbool fResetFlags=%#x\n", fHardReset, fResetFlags));
2640 int rc;
2641 if (fHardReset)
2642 {
2643 /*
2644 * Hard reset.
2645 */
2646 /* Check whether we're supposed to power off instead of resetting. */
2647 if (pVM->vm.s.fPowerOffInsteadOfReset)
2648 {
2649 PUVM pUVM = pVM->pUVM;
2650 if ( pUVM->pVmm2UserMethods
2651 && pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff)
2652 pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff(pUVM->pVmm2UserMethods, pUVM);
2653 return VMR3PowerOff(pUVM);
2654 }
2655
2656 /* Gather all the EMTs to make sure there are no races before changing
2657 the VM state. */
2658 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2659 vmR3HardReset, NULL);
2660 }
2661 else
2662 {
2663 /*
2664 * Soft reset. Since we only support this with a single CPU active,
2665 * we must be on EMT #0 here.
2666 */
2667 VM_ASSERT_EMT0(pVM);
2668 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2669 vmR3SoftReset, &fResetFlags);
2670 }
2671
2672 LogFlow(("vmR3ResetCommon: returns %Rrc\n", rc));
2673 return rc;
2674}
2675
2676
2677
2678/**
2679 * Reset the current VM.
2680 *
2681 * @returns VBox status code.
2682 * @param pUVM The VM to reset.
2683 */
2684VMMR3DECL(int) VMR3Reset(PUVM pUVM)
2685{
2686 LogFlow(("VMR3Reset:\n"));
2687 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2688 PVM pVM = pUVM->pVM;
2689 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2690
2691 return VBOXSTRICTRC_VAL(vmR3ResetCommon(pVM, true, 0));
2692}
2693
2694
2695/**
2696 * Handle the reset force flag or triple fault.
2697 *
2698 * This handles both soft and hard resets (see PDMVMRESET_F_XXX).
2699 *
2700 * @returns VBox status code.
2701 * @param pVM The cross context VM structure.
2702 * @thread EMT
2703 *
2704 * @remarks Caller is expected to clear the VM_FF_RESET force flag.
2705 */
2706VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetFF(PVM pVM)
2707{
2708 LogFlow(("VMR3ResetFF:\n"));
2709
2710 /*
2711 * First consult the firmware on whether this is a hard or soft reset.
2712 */
2713 uint32_t fResetFlags;
2714 bool fHardReset = PDMR3GetResetInfo(pVM, 0 /*fOverride*/, &fResetFlags);
2715 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
2716}
2717
2718
2719/**
2720 * For handling a CPU reset on triple fault.
2721 *
2722 * According to one mainboard manual, a CPU triple fault causes the 286 CPU to
2723 * send a SHUTDOWN signal to the chipset. The chipset responds by sending a
2724 * RESET signal to the CPU. So, it should be very similar to a soft/warm reset.
2725 *
2726 * @returns VBox status code.
2727 * @param pVM The cross context VM structure.
2728 * @thread EMT
2729 */
2730VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetTripleFault(PVM pVM)
2731{
2732 LogFlow(("VMR3ResetTripleFault:\n"));
2733
2734 /*
2735 * First consult the firmware on whether this is a hard or soft reset.
2736 */
2737 uint32_t fResetFlags;
2738 bool fHardReset = PDMR3GetResetInfo(pVM, PDMVMRESET_F_TRIPLE_FAULT, &fResetFlags);
2739 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
2740}
2741
2742
2743/**
2744 * Gets the user mode VM structure pointer given Pointer to the VM.
2745 *
2746 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
2747 * invalid (asserted).
2748 * @param pVM The cross context VM structure.
2749 * @sa VMR3GetVM, VMR3RetainUVM
2750 */
2751VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
2752{
2753 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
2754 return pVM->pUVM;
2755}
2756
2757
2758/**
2759 * Gets the shared VM structure pointer given the pointer to the user mode VM
2760 * structure.
2761 *
2762 * @returns Pointer to the VM.
2763 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
2764 * is currently associated with it.
2765 * @param pUVM The user mode VM handle.
2766 * @sa VMR3GetUVM
2767 */
2768VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
2769{
2770 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2771 return pUVM->pVM;
2772}
2773
2774
2775/**
2776 * Retain the user mode VM handle.
2777 *
2778 * @returns Reference count.
2779 * UINT32_MAX if @a pUVM is invalid.
2780 *
2781 * @param pUVM The user mode VM handle.
2782 * @sa VMR3ReleaseUVM
2783 */
2784VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
2785{
2786 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2787 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
2788 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
2789 return cRefs;
2790}
2791
2792
2793/**
2794 * Does the final release of the UVM structure.
2795 *
2796 * @param pUVM The user mode VM handle.
2797 */
2798static void vmR3DoReleaseUVM(PUVM pUVM)
2799{
2800 /*
2801 * Free the UVM.
2802 */
2803 Assert(!pUVM->pVM);
2804
2805 MMR3HeapFree(pUVM->vm.s.pszName);
2806 pUVM->vm.s.pszName = NULL;
2807
2808 MMR3TermUVM(pUVM);
2809 STAMR3TermUVM(pUVM);
2810
2811 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2812 RTTlsFree(pUVM->vm.s.idxTLS);
2813 RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
2814}
2815
2816
2817/**
2818 * Releases a refernece to the mode VM handle.
2819 *
2820 * @returns The new reference count, 0 if destroyed.
2821 * UINT32_MAX if @a pUVM is invalid.
2822 *
2823 * @param pUVM The user mode VM handle.
2824 * @sa VMR3RetainUVM
2825 */
2826VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
2827{
2828 if (!pUVM)
2829 return 0;
2830 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2831 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
2832 if (!cRefs)
2833 vmR3DoReleaseUVM(pUVM);
2834 else
2835 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
2836 return cRefs;
2837}
2838
2839
2840/**
2841 * Gets the VM name.
2842 *
2843 * @returns Pointer to a read-only string containing the name. NULL if called
2844 * too early.
2845 * @param pUVM The user mode VM handle.
2846 */
2847VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
2848{
2849 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2850 return pUVM->vm.s.pszName;
2851}
2852
2853
2854/**
2855 * Gets the VM UUID.
2856 *
2857 * @returns pUuid on success, NULL on failure.
2858 * @param pUVM The user mode VM handle.
2859 * @param pUuid Where to store the UUID.
2860 */
2861VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
2862{
2863 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2864 AssertPtrReturn(pUuid, NULL);
2865
2866 *pUuid = pUVM->vm.s.Uuid;
2867 return pUuid;
2868}
2869
2870
2871/**
2872 * Gets the current VM state.
2873 *
2874 * @returns The current VM state.
2875 * @param pVM The cross context VM structure.
2876 * @thread Any
2877 */
2878VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
2879{
2880 AssertMsgReturn(RT_VALID_ALIGNED_PTR(pVM, HOST_PAGE_SIZE), ("%p\n", pVM), VMSTATE_TERMINATED);
2881 VMSTATE enmVMState = pVM->enmVMState;
2882 return enmVMState >= VMSTATE_CREATING && enmVMState <= VMSTATE_TERMINATED ? enmVMState : VMSTATE_TERMINATED;
2883}
2884
2885
2886/**
2887 * Gets the current VM state.
2888 *
2889 * @returns The current VM state.
2890 * @param pUVM The user-mode VM handle.
2891 * @thread Any
2892 */
2893VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
2894{
2895 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
2896 if (RT_UNLIKELY(!pUVM->pVM))
2897 return VMSTATE_TERMINATED;
2898 return pUVM->pVM->enmVMState;
2899}
2900
2901
2902/**
2903 * Gets the state name string for a VM state.
2904 *
2905 * @returns Pointer to the state name. (readonly)
2906 * @param enmState The state.
2907 */
2908VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
2909{
2910 switch (enmState)
2911 {
2912 case VMSTATE_CREATING: return "CREATING";
2913 case VMSTATE_CREATED: return "CREATED";
2914 case VMSTATE_LOADING: return "LOADING";
2915 case VMSTATE_POWERING_ON: return "POWERING_ON";
2916 case VMSTATE_RESUMING: return "RESUMING";
2917 case VMSTATE_RUNNING: return "RUNNING";
2918 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
2919 case VMSTATE_RESETTING: return "RESETTING";
2920 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
2921 case VMSTATE_SOFT_RESETTING: return "SOFT_RESETTING";
2922 case VMSTATE_SOFT_RESETTING_LS: return "SOFT_RESETTING_LS";
2923 case VMSTATE_SUSPENDED: return "SUSPENDED";
2924 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
2925 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
2926 case VMSTATE_SUSPENDING: return "SUSPENDING";
2927 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
2928 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
2929 case VMSTATE_SAVING: return "SAVING";
2930 case VMSTATE_DEBUGGING: return "DEBUGGING";
2931 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
2932 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
2933 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
2934 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
2935 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
2936 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
2937 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
2938 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
2939 case VMSTATE_OFF: return "OFF";
2940 case VMSTATE_OFF_LS: return "OFF_LS";
2941 case VMSTATE_DESTROYING: return "DESTROYING";
2942 case VMSTATE_TERMINATED: return "TERMINATED";
2943
2944 default:
2945 AssertMsgFailed(("Unknown state %d\n", enmState));
2946 return "Unknown!\n";
2947 }
2948}
2949
2950
2951/**
2952 * Validates the state transition in strict builds.
2953 *
2954 * @returns true if valid, false if not.
2955 *
2956 * @param enmStateOld The old (current) state.
2957 * @param enmStateNew The proposed new state.
2958 *
2959 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
2960 * diagram (under State Machine Diagram).
2961 */
2962static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
2963{
2964#ifndef VBOX_STRICT
2965 RT_NOREF2(enmStateOld, enmStateNew);
2966#else
2967 switch (enmStateOld)
2968 {
2969 case VMSTATE_CREATING:
2970 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2971 break;
2972
2973 case VMSTATE_CREATED:
2974 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
2975 || enmStateNew == VMSTATE_POWERING_ON
2976 || enmStateNew == VMSTATE_POWERING_OFF
2977 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2978 break;
2979
2980 case VMSTATE_LOADING:
2981 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
2982 || enmStateNew == VMSTATE_LOAD_FAILURE
2983 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2984 break;
2985
2986 case VMSTATE_POWERING_ON:
2987 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2988 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
2989 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2990 break;
2991
2992 case VMSTATE_RESUMING:
2993 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2994 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
2995 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2996 break;
2997
2998 case VMSTATE_RUNNING:
2999 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3000 || enmStateNew == VMSTATE_SUSPENDING
3001 || enmStateNew == VMSTATE_RESETTING
3002 || enmStateNew == VMSTATE_SOFT_RESETTING
3003 || enmStateNew == VMSTATE_RUNNING_LS
3004 || enmStateNew == VMSTATE_DEBUGGING
3005 || enmStateNew == VMSTATE_FATAL_ERROR
3006 || enmStateNew == VMSTATE_GURU_MEDITATION
3007 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3008 break;
3009
3010 case VMSTATE_RUNNING_LS:
3011 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3012 || enmStateNew == VMSTATE_SUSPENDING_LS
3013 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3014 || enmStateNew == VMSTATE_RESETTING_LS
3015 || enmStateNew == VMSTATE_SOFT_RESETTING_LS
3016 || enmStateNew == VMSTATE_RUNNING
3017 || enmStateNew == VMSTATE_DEBUGGING_LS
3018 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3019 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3020 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3021 break;
3022
3023 case VMSTATE_RESETTING:
3024 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3025 break;
3026
3027 case VMSTATE_SOFT_RESETTING:
3028 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3029 break;
3030
3031 case VMSTATE_RESETTING_LS:
3032 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3033 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3034 break;
3035
3036 case VMSTATE_SOFT_RESETTING_LS:
3037 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING_LS
3038 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3039 break;
3040
3041 case VMSTATE_SUSPENDING:
3042 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3043 break;
3044
3045 case VMSTATE_SUSPENDING_LS:
3046 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3047 || enmStateNew == VMSTATE_SUSPENDED_LS
3048 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3049 break;
3050
3051 case VMSTATE_SUSPENDING_EXT_LS:
3052 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3053 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3054 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3055 break;
3056
3057 case VMSTATE_SUSPENDED:
3058 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3059 || enmStateNew == VMSTATE_SAVING
3060 || enmStateNew == VMSTATE_RESETTING
3061 || enmStateNew == VMSTATE_SOFT_RESETTING
3062 || enmStateNew == VMSTATE_RESUMING
3063 || enmStateNew == VMSTATE_LOADING
3064 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3065 break;
3066
3067 case VMSTATE_SUSPENDED_LS:
3068 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3069 || enmStateNew == VMSTATE_SAVING
3070 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3071 break;
3072
3073 case VMSTATE_SUSPENDED_EXT_LS:
3074 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3075 || enmStateNew == VMSTATE_SAVING
3076 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3077 break;
3078
3079 case VMSTATE_SAVING:
3080 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3081 break;
3082
3083 case VMSTATE_DEBUGGING:
3084 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3085 || enmStateNew == VMSTATE_POWERING_OFF
3086 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3087 break;
3088
3089 case VMSTATE_DEBUGGING_LS:
3090 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3091 || enmStateNew == VMSTATE_RUNNING_LS
3092 || enmStateNew == VMSTATE_POWERING_OFF_LS
3093 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3094 break;
3095
3096 case VMSTATE_POWERING_OFF:
3097 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3098 break;
3099
3100 case VMSTATE_POWERING_OFF_LS:
3101 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3102 || enmStateNew == VMSTATE_OFF_LS
3103 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3104 break;
3105
3106 case VMSTATE_OFF:
3107 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3108 break;
3109
3110 case VMSTATE_OFF_LS:
3111 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3112 break;
3113
3114 case VMSTATE_FATAL_ERROR:
3115 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3116 break;
3117
3118 case VMSTATE_FATAL_ERROR_LS:
3119 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3120 || enmStateNew == VMSTATE_POWERING_OFF_LS
3121 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3122 break;
3123
3124 case VMSTATE_GURU_MEDITATION:
3125 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3126 || enmStateNew == VMSTATE_POWERING_OFF
3127 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3128 break;
3129
3130 case VMSTATE_GURU_MEDITATION_LS:
3131 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3132 || enmStateNew == VMSTATE_DEBUGGING_LS
3133 || enmStateNew == VMSTATE_POWERING_OFF_LS
3134 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3135 break;
3136
3137 case VMSTATE_LOAD_FAILURE:
3138 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3139 break;
3140
3141 case VMSTATE_DESTROYING:
3142 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3143 break;
3144
3145 case VMSTATE_TERMINATED:
3146 default:
3147 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3148 break;
3149 }
3150#endif /* VBOX_STRICT */
3151 return true;
3152}
3153
3154
3155/**
3156 * Does the state change callouts.
3157 *
3158 * The caller owns the AtStateCritSect.
3159 *
3160 * @param pVM The cross context VM structure.
3161 * @param pUVM The UVM handle.
3162 * @param enmStateNew The New state.
3163 * @param enmStateOld The old state.
3164 */
3165static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3166{
3167 LogRel(("Changing the VM state from '%s' to '%s'\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3168
3169 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3170 {
3171 pCur->pfnAtState(pUVM, VMMR3GetVTable(), enmStateNew, enmStateOld, pCur->pvUser);
3172 if ( enmStateNew != VMSTATE_DESTROYING
3173 && pVM->enmVMState == VMSTATE_DESTROYING)
3174 break;
3175 AssertMsg(pVM->enmVMState == enmStateNew,
3176 ("You are not allowed to change the state while in the change callback, except "
3177 "from destroying the VM. There are restrictions in the way the state changes "
3178 "are propagated up to the EM execution loop and it makes the program flow very "
3179 "difficult to follow. (%s, expected %s, old %s)\n",
3180 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3181 VMR3GetStateName(enmStateOld)));
3182 }
3183}
3184
3185
3186/**
3187 * Sets the current VM state, with the AtStatCritSect already entered.
3188 *
3189 * @param pVM The cross context VM structure.
3190 * @param pUVM The UVM handle.
3191 * @param enmStateNew The new state.
3192 * @param enmStateOld The old state.
3193 * @param fSetRatherThanClearFF The usual behavior is to clear the
3194 * VM_FF_CHECK_VM_STATE force flag, but for
3195 * some transitions (-> guru) we need to kick
3196 * the other EMTs to stop what they're doing.
3197 */
3198static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF)
3199{
3200 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3201
3202 AssertMsg(pVM->enmVMState == enmStateOld,
3203 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3204
3205 pUVM->vm.s.enmPrevVMState = enmStateOld;
3206 pVM->enmVMState = enmStateNew;
3207
3208 if (!fSetRatherThanClearFF)
3209 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3210 else if (pVM->cCpus > 0)
3211 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3212
3213 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3214}
3215
3216
3217/**
3218 * Sets the current VM state.
3219 *
3220 * @param pVM The cross context VM structure.
3221 * @param enmStateNew The new state.
3222 * @param enmStateOld The old state (for asserting only).
3223 */
3224static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3225{
3226 PUVM pUVM = pVM->pUVM;
3227 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3228
3229 RT_NOREF_PV(enmStateOld);
3230 AssertMsg(pVM->enmVMState == enmStateOld,
3231 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3232 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState, false /*fSetRatherThanClearFF*/);
3233
3234 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3235}
3236
3237
3238/**
3239 * Tries to perform a state transition.
3240 *
3241 * @returns The 1-based ordinal of the succeeding transition.
3242 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3243 *
3244 * @param pVM The cross context VM structure.
3245 * @param pszWho Who is trying to change it.
3246 * @param cTransitions The number of transitions in the ellipsis.
3247 * @param ... Transition pairs; new, old.
3248 */
3249static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3250{
3251 va_list va;
3252 VMSTATE enmStateNew = VMSTATE_CREATED;
3253 VMSTATE enmStateOld = VMSTATE_CREATED;
3254
3255#ifdef VBOX_STRICT
3256 /*
3257 * Validate the input first.
3258 */
3259 va_start(va, cTransitions);
3260 for (unsigned i = 0; i < cTransitions; i++)
3261 {
3262 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3263 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3264 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3265 }
3266 va_end(va);
3267#endif
3268
3269 /*
3270 * Grab the lock and see if any of the proposed transitions works out.
3271 */
3272 va_start(va, cTransitions);
3273 int rc = VERR_VM_INVALID_VM_STATE;
3274 PUVM pUVM = pVM->pUVM;
3275 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3276
3277 VMSTATE enmStateCur = pVM->enmVMState;
3278
3279 for (unsigned i = 0; i < cTransitions; i++)
3280 {
3281 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3282 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3283 if (enmStateCur == enmStateOld)
3284 {
3285 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld, false /*fSetRatherThanClearFF*/);
3286 rc = i + 1;
3287 break;
3288 }
3289 }
3290
3291 if (RT_FAILURE(rc))
3292 {
3293 /*
3294 * Complain about it.
3295 */
3296 const char * const pszStateCur = VMR3GetStateName(enmStateCur);
3297 if (cTransitions == 1)
3298 {
3299 LogRel(("%s: %s -> %s failed, because the VM state is actually %s!\n",
3300 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), pszStateCur));
3301 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS, N_("%s failed because the VM state is %s instead of %s"),
3302 pszWho, pszStateCur, VMR3GetStateName(enmStateOld));
3303 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3304 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), pszStateCur));
3305 }
3306 else
3307 {
3308 char szTransitions[4096];
3309 size_t cchTransitions = 0;
3310 szTransitions[0] = '\0';
3311 va_end(va);
3312 va_start(va, cTransitions);
3313 for (unsigned i = 0; i < cTransitions; i++)
3314 {
3315 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3316 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3317 const char * const pszStateNew = VMR3GetStateName(enmStateNew);
3318 const char * const pszStateOld = VMR3GetStateName(enmStateOld);
3319 LogRel(("%s%s -> %s", i ? ", " : " ", pszStateOld, pszStateNew));
3320 cchTransitions += RTStrPrintf(&szTransitions[cchTransitions], sizeof(szTransitions) - cchTransitions,
3321 "%s%s -> %s", i ? ", " : " ", pszStateOld, pszStateNew);
3322 }
3323 Assert(cchTransitions < sizeof(szTransitions) - 64);
3324
3325 LogRel(("%s: %s failed, because the VM state is actually %s!\n", pszWho, szTransitions, pszStateCur));
3326 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3327 N_("%s failed because the current VM state, %s, was not found in the state transition table (%s)"),
3328 pszWho, pszStateCur, szTransitions);
3329 AssertMsgFailed(("%s - state=%s, transitions: %s. Check the cTransitions passed us.\n",
3330 pszWho, pszStateCur, szTransitions));
3331 }
3332 }
3333
3334 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3335 va_end(va);
3336 Assert(rc > 0 || rc < 0);
3337 return rc;
3338}
3339
3340
3341/**
3342 * Interface used by EM to signal that it's entering the guru meditation state.
3343 *
3344 * This will notifying other threads.
3345 *
3346 * @returns true if the state changed to Guru, false if no state change.
3347 * @param pVM The cross context VM structure.
3348 */
3349VMMR3_INT_DECL(bool) VMR3SetGuruMeditation(PVM pVM)
3350{
3351 PUVM pUVM = pVM->pUVM;
3352 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3353
3354 VMSTATE enmStateCur = pVM->enmVMState;
3355 bool fRc = true;
3356 if (enmStateCur == VMSTATE_RUNNING)
3357 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING, true /*fSetRatherThanClearFF*/);
3358 else if (enmStateCur == VMSTATE_RUNNING_LS)
3359 {
3360 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS, true /*fSetRatherThanClearFF*/);
3361 SSMR3Cancel(pUVM);
3362 }
3363 else
3364 fRc = false;
3365
3366 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3367 return fRc;
3368}
3369
3370
3371/**
3372 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3373 *
3374 * @param pVM The cross context VM structure.
3375 */
3376void vmR3SetTerminated(PVM pVM)
3377{
3378 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3379}
3380
3381
3382/**
3383 * Checks if the VM was teleported and hasn't been fully resumed yet.
3384 *
3385 * This applies to both sides of the teleportation since we may leave a working
3386 * clone behind and the user is allowed to resume this...
3387 *
3388 * @returns true / false.
3389 * @param pVM The cross context VM structure.
3390 * @thread Any thread.
3391 */
3392VMMR3_INT_DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3393{
3394 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3395 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3396}
3397
3398
3399/**
3400 * Registers a VM state change callback.
3401 *
3402 * You are not allowed to call any function which changes the VM state from a
3403 * state callback.
3404 *
3405 * @returns VBox status code.
3406 * @param pUVM The VM handle.
3407 * @param pfnAtState Pointer to callback.
3408 * @param pvUser User argument.
3409 * @thread Any.
3410 */
3411VMMR3DECL(int) VMR3AtStateRegister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3412{
3413 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3414
3415 /*
3416 * Validate input.
3417 */
3418 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3419 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3420
3421 /*
3422 * Allocate a new record.
3423 */
3424 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3425 if (!pNew)
3426 return VERR_NO_MEMORY;
3427
3428 /* fill */
3429 pNew->pfnAtState = pfnAtState;
3430 pNew->pvUser = pvUser;
3431
3432 /* insert */
3433 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3434 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3435 *pUVM->vm.s.ppAtStateNext = pNew;
3436 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3437 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3438
3439 return VINF_SUCCESS;
3440}
3441
3442
3443/**
3444 * Deregisters a VM state change callback.
3445 *
3446 * @returns VBox status code.
3447 * @param pUVM The VM handle.
3448 * @param pfnAtState Pointer to callback.
3449 * @param pvUser User argument.
3450 * @thread Any.
3451 */
3452VMMR3DECL(int) VMR3AtStateDeregister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3453{
3454 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3455
3456 /*
3457 * Validate input.
3458 */
3459 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3460 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3461
3462 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3463
3464 /*
3465 * Search the list for the entry.
3466 */
3467 PVMATSTATE pPrev = NULL;
3468 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3469 while ( pCur
3470 && ( pCur->pfnAtState != pfnAtState
3471 || pCur->pvUser != pvUser))
3472 {
3473 pPrev = pCur;
3474 pCur = pCur->pNext;
3475 }
3476 if (!pCur)
3477 {
3478 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3479 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3480 return VERR_FILE_NOT_FOUND;
3481 }
3482
3483 /*
3484 * Unlink it.
3485 */
3486 if (pPrev)
3487 {
3488 pPrev->pNext = pCur->pNext;
3489 if (!pCur->pNext)
3490 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3491 }
3492 else
3493 {
3494 pUVM->vm.s.pAtState = pCur->pNext;
3495 if (!pCur->pNext)
3496 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3497 }
3498
3499 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3500
3501 /*
3502 * Free it.
3503 */
3504 pCur->pfnAtState = NULL;
3505 pCur->pNext = NULL;
3506 MMR3HeapFree(pCur);
3507
3508 return VINF_SUCCESS;
3509}
3510
3511
3512/**
3513 * Registers a VM error callback.
3514 *
3515 * @returns VBox status code.
3516 * @param pUVM The VM handle.
3517 * @param pfnAtError Pointer to callback.
3518 * @param pvUser User argument.
3519 * @thread Any.
3520 */
3521VMMR3DECL(int) VMR3AtErrorRegister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3522{
3523 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3524
3525 /*
3526 * Validate input.
3527 */
3528 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3529 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3530
3531 /*
3532 * Allocate a new record.
3533 */
3534 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3535 if (!pNew)
3536 return VERR_NO_MEMORY;
3537
3538 /* fill */
3539 pNew->pfnAtError = pfnAtError;
3540 pNew->pvUser = pvUser;
3541
3542 /* insert */
3543 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3544 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3545 *pUVM->vm.s.ppAtErrorNext = pNew;
3546 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3547 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3548
3549 return VINF_SUCCESS;
3550}
3551
3552
3553/**
3554 * Deregisters a VM error callback.
3555 *
3556 * @returns VBox status code.
3557 * @param pUVM The VM handle.
3558 * @param pfnAtError Pointer to callback.
3559 * @param pvUser User argument.
3560 * @thread Any.
3561 */
3562VMMR3DECL(int) VMR3AtErrorDeregister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3563{
3564 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3565
3566 /*
3567 * Validate input.
3568 */
3569 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3570 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3571
3572 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3573
3574 /*
3575 * Search the list for the entry.
3576 */
3577 PVMATERROR pPrev = NULL;
3578 PVMATERROR pCur = pUVM->vm.s.pAtError;
3579 while ( pCur
3580 && ( pCur->pfnAtError != pfnAtError
3581 || pCur->pvUser != pvUser))
3582 {
3583 pPrev = pCur;
3584 pCur = pCur->pNext;
3585 }
3586 if (!pCur)
3587 {
3588 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3589 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3590 return VERR_FILE_NOT_FOUND;
3591 }
3592
3593 /*
3594 * Unlink it.
3595 */
3596 if (pPrev)
3597 {
3598 pPrev->pNext = pCur->pNext;
3599 if (!pCur->pNext)
3600 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3601 }
3602 else
3603 {
3604 pUVM->vm.s.pAtError = pCur->pNext;
3605 if (!pCur->pNext)
3606 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3607 }
3608
3609 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3610
3611 /*
3612 * Free it.
3613 */
3614 pCur->pfnAtError = NULL;
3615 pCur->pNext = NULL;
3616 MMR3HeapFree(pCur);
3617
3618 return VINF_SUCCESS;
3619}
3620
3621
3622/**
3623 * Ellipsis to va_list wrapper for calling pfnAtError.
3624 */
3625static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3626{
3627 va_list va;
3628 va_start(va, pszFormat);
3629 pCur->pfnAtError(pVM->pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3630 va_end(va);
3631}
3632
3633
3634/**
3635 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3636 * The message is found in VMINT.
3637 *
3638 * @param pVM The cross context VM structure.
3639 * @thread EMT.
3640 */
3641VMMR3_INT_DECL(void) VMR3SetErrorWorker(PVM pVM)
3642{
3643 VM_ASSERT_EMT(pVM);
3644 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Congrats!\n"));
3645
3646 /*
3647 * Unpack the error (if we managed to format one).
3648 */
3649 PVMERROR pErr = pVM->vm.s.pErrorR3;
3650 const char *pszFile = NULL;
3651 const char *pszFunction = NULL;
3652 uint32_t iLine = 0;
3653 const char *pszMessage;
3654 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3655 if (pErr)
3656 {
3657 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3658 if (pErr->offFile)
3659 pszFile = (const char *)pErr + pErr->offFile;
3660 iLine = pErr->iLine;
3661 if (pErr->offFunction)
3662 pszFunction = (const char *)pErr + pErr->offFunction;
3663 if (pErr->offMessage)
3664 pszMessage = (const char *)pErr + pErr->offMessage;
3665 else
3666 pszMessage = "No message!";
3667 }
3668 else
3669 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3670
3671 /*
3672 * Call the at error callbacks.
3673 */
3674 PUVM pUVM = pVM->pUVM;
3675 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3676 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3677 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3678 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3679 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3680}
3681
3682
3683/**
3684 * Gets the number of errors raised via VMSetError.
3685 *
3686 * This can be used avoid double error messages.
3687 *
3688 * @returns The error count.
3689 * @param pUVM The VM handle.
3690 */
3691VMMR3_INT_DECL(uint32_t) VMR3GetErrorCount(PUVM pUVM)
3692{
3693 AssertPtrReturn(pUVM, 0);
3694 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3695 return pUVM->vm.s.cErrors;
3696}
3697
3698
3699/**
3700 * Creation time wrapper for vmR3SetErrorUV.
3701 *
3702 * @returns rc.
3703 * @param pUVM Pointer to the user mode VM structure.
3704 * @param rc The VBox status code.
3705 * @param SRC_POS The source position of this error.
3706 * @param pszFormat Format string.
3707 * @param ... The arguments.
3708 * @thread Any thread.
3709 */
3710static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3711{
3712 va_list va;
3713 va_start(va, pszFormat);
3714 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3715 va_end(va);
3716 return rc;
3717}
3718
3719
3720/**
3721 * Worker which calls everyone listening to the VM error messages.
3722 *
3723 * @param pUVM Pointer to the user mode VM structure.
3724 * @param rc The VBox status code.
3725 * @param SRC_POS The source position of this error.
3726 * @param pszFormat Format string.
3727 * @param pArgs Pointer to the format arguments.
3728 * @thread EMT
3729 */
3730DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3731{
3732 /*
3733 * Log the error.
3734 */
3735 va_list va3;
3736 va_copy(va3, *pArgs);
3737 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3738 "VMSetError: %N\n",
3739 pszFile, iLine, pszFunction, rc,
3740 pszFormat, &va3);
3741 va_end(va3);
3742
3743#ifdef LOG_ENABLED
3744 va_copy(va3, *pArgs);
3745 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3746 "%N\n",
3747 pszFile, iLine, pszFunction, rc,
3748 pszFormat, &va3);
3749 va_end(va3);
3750#endif
3751
3752 /*
3753 * Make a copy of the message.
3754 */
3755 if (pUVM->pVM)
3756 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3757
3758 /*
3759 * Call the at error callbacks.
3760 */
3761 bool fCalledSomeone = false;
3762 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3763 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3764 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3765 {
3766 va_list va2;
3767 va_copy(va2, *pArgs);
3768 pCur->pfnAtError(pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3769 va_end(va2);
3770 fCalledSomeone = true;
3771 }
3772 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3773}
3774
3775
3776/**
3777 * Sets the error message.
3778 *
3779 * @returns rc. Meaning you can do:
3780 * @code
3781 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
3782 * @endcode
3783 * @param pUVM The user mode VM handle.
3784 * @param rc VBox status code.
3785 * @param SRC_POS Use RT_SRC_POS.
3786 * @param pszFormat Error message format string.
3787 * @param ... Error message arguments.
3788 * @thread Any
3789 */
3790VMMR3DECL(int) VMR3SetError(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3791{
3792 va_list va;
3793 va_start(va, pszFormat);
3794 int rcRet = VMR3SetErrorV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
3795 va_end(va);
3796 return rcRet;
3797}
3798
3799
3800/**
3801 * Sets the error message.
3802 *
3803 * @returns rc. Meaning you can do:
3804 * @code
3805 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
3806 * @endcode
3807 * @param pUVM The user mode VM handle.
3808 * @param rc VBox status code.
3809 * @param SRC_POS Use RT_SRC_POS.
3810 * @param pszFormat Error message format string.
3811 * @param va Error message arguments.
3812 * @thread Any
3813 */
3814VMMR3DECL(int) VMR3SetErrorV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
3815{
3816 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3817
3818 /* Take shortcut when called on EMT, skipping VM handle requirement + validation. */
3819 if (VMR3GetVMCPUThread(pUVM) != NIL_RTTHREAD)
3820 {
3821 va_list vaCopy;
3822 va_copy(vaCopy, va);
3823 vmR3SetErrorUV(pUVM, rc, RT_SRC_POS_ARGS, pszFormat, &vaCopy);
3824 va_end(vaCopy);
3825 return rc;
3826 }
3827
3828 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
3829 return VMSetErrorV(pUVM->pVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
3830}
3831
3832
3833
3834/**
3835 * Registers a VM runtime error callback.
3836 *
3837 * @returns VBox status code.
3838 * @param pUVM The user mode VM structure.
3839 * @param pfnAtRuntimeError Pointer to callback.
3840 * @param pvUser User argument.
3841 * @thread Any.
3842 */
3843VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3844{
3845 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3846
3847 /*
3848 * Validate input.
3849 */
3850 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3851 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3852
3853 /*
3854 * Allocate a new record.
3855 */
3856 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3857 if (!pNew)
3858 return VERR_NO_MEMORY;
3859
3860 /* fill */
3861 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3862 pNew->pvUser = pvUser;
3863
3864 /* insert */
3865 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3866 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3867 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3868 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3869 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3870
3871 return VINF_SUCCESS;
3872}
3873
3874
3875/**
3876 * Deregisters a VM runtime error callback.
3877 *
3878 * @returns VBox status code.
3879 * @param pUVM The user mode VM handle.
3880 * @param pfnAtRuntimeError Pointer to callback.
3881 * @param pvUser User argument.
3882 * @thread Any.
3883 */
3884VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3885{
3886 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3887
3888 /*
3889 * Validate input.
3890 */
3891 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3892 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3893
3894 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3895
3896 /*
3897 * Search the list for the entry.
3898 */
3899 PVMATRUNTIMEERROR pPrev = NULL;
3900 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3901 while ( pCur
3902 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3903 || pCur->pvUser != pvUser))
3904 {
3905 pPrev = pCur;
3906 pCur = pCur->pNext;
3907 }
3908 if (!pCur)
3909 {
3910 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
3911 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3912 return VERR_FILE_NOT_FOUND;
3913 }
3914
3915 /*
3916 * Unlink it.
3917 */
3918 if (pPrev)
3919 {
3920 pPrev->pNext = pCur->pNext;
3921 if (!pCur->pNext)
3922 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
3923 }
3924 else
3925 {
3926 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
3927 if (!pCur->pNext)
3928 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
3929 }
3930
3931 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3932
3933 /*
3934 * Free it.
3935 */
3936 pCur->pfnAtRuntimeError = NULL;
3937 pCur->pNext = NULL;
3938 MMR3HeapFree(pCur);
3939
3940 return VINF_SUCCESS;
3941}
3942
3943
3944/**
3945 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
3946 * the state to FatalError(LS).
3947 *
3948 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
3949 * return code, see FNVMMEMTRENDEZVOUS.)
3950 *
3951 * @param pVM The cross context VM structure.
3952 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3953 * @param pvUser Ignored.
3954 */
3955static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
3956{
3957 NOREF(pVCpu);
3958 Assert(!pvUser); NOREF(pvUser);
3959
3960 /*
3961 * The first EMT thru here changes the state.
3962 */
3963 if (pVCpu->idCpu == pVM->cCpus - 1)
3964 {
3965 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
3966 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
3967 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
3968 if (RT_FAILURE(rc))
3969 return rc;
3970 if (rc == 2)
3971 SSMR3Cancel(pVM->pUVM);
3972
3973 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3974 }
3975
3976 /* This'll make sure we get out of whereever we are (e.g. REM). */
3977 return VINF_EM_SUSPEND;
3978}
3979
3980
3981/**
3982 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
3983 *
3984 * This does the common parts after the error has been saved / retrieved.
3985 *
3986 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3987 *
3988 * @param pVM The cross context VM structure.
3989 * @param fFlags The error flags.
3990 * @param pszErrorId Error ID string.
3991 * @param pszFormat Format string.
3992 * @param pVa Pointer to the format arguments.
3993 */
3994static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
3995{
3996 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
3997 PUVM pUVM = pVM->pUVM;
3998
3999 /*
4000 * Take actions before the call.
4001 */
4002 int rc;
4003 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4004 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4005 vmR3SetRuntimeErrorChangeState, NULL);
4006 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4007 rc = VMR3Suspend(pUVM, VMSUSPENDREASON_RUNTIME_ERROR);
4008 else
4009 rc = VINF_SUCCESS;
4010
4011 /*
4012 * Do the callback round.
4013 */
4014 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4015 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4016 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4017 {
4018 va_list va;
4019 va_copy(va, *pVa);
4020 pCur->pfnAtRuntimeError(pUVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4021 va_end(va);
4022 }
4023 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4024
4025 return rc;
4026}
4027
4028
4029/**
4030 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4031 */
4032static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4033{
4034 va_list va;
4035 va_start(va, pszFormat);
4036 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4037 va_end(va);
4038 return rc;
4039}
4040
4041
4042/**
4043 * This is a worker function for RC and Ring-0 calls to VMSetError and
4044 * VMSetErrorV.
4045 *
4046 * The message is found in VMINT.
4047 *
4048 * @returns VBox status code, see VMSetRuntimeError.
4049 * @param pVM The cross context VM structure.
4050 * @thread EMT.
4051 */
4052VMMR3_INT_DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4053{
4054 VM_ASSERT_EMT(pVM);
4055 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4056
4057 /*
4058 * Unpack the error (if we managed to format one).
4059 */
4060 const char *pszErrorId = "SetRuntimeError";
4061 const char *pszMessage = "No message!";
4062 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4063 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4064 if (pErr)
4065 {
4066 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4067 if (pErr->offErrorId)
4068 pszErrorId = (const char *)pErr + pErr->offErrorId;
4069 if (pErr->offMessage)
4070 pszMessage = (const char *)pErr + pErr->offMessage;
4071 fFlags = pErr->fFlags;
4072 }
4073
4074 /*
4075 * Join cause with vmR3SetRuntimeErrorV.
4076 */
4077 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4078}
4079
4080
4081/**
4082 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4083 *
4084 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4085 *
4086 * @param pVM The cross context VM structure.
4087 * @param fFlags The error flags.
4088 * @param pszErrorId Error ID string.
4089 * @param pszMessage The error message residing the MM heap.
4090 *
4091 * @thread EMT
4092 */
4093DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4094{
4095#if 0 /** @todo make copy of the error msg. */
4096 /*
4097 * Make a copy of the message.
4098 */
4099 va_list va2;
4100 va_copy(va2, *pVa);
4101 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4102 va_end(va2);
4103#endif
4104
4105 /*
4106 * Join paths with VMR3SetRuntimeErrorWorker.
4107 */
4108 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4109 MMR3HeapFree(pszMessage);
4110 return rc;
4111}
4112
4113
4114/**
4115 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4116 *
4117 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4118 *
4119 * @param pVM The cross context VM structure.
4120 * @param fFlags The error flags.
4121 * @param pszErrorId Error ID string.
4122 * @param pszFormat Format string.
4123 * @param pVa Pointer to the format arguments.
4124 *
4125 * @thread EMT
4126 */
4127DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4128{
4129 /*
4130 * Make a copy of the message.
4131 */
4132 va_list va2;
4133 va_copy(va2, *pVa);
4134 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4135 va_end(va2);
4136
4137 /*
4138 * Join paths with VMR3SetRuntimeErrorWorker.
4139 */
4140 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4141}
4142
4143
4144/**
4145 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4146 *
4147 * This can be used avoid double error messages.
4148 *
4149 * @returns The runtime error count.
4150 * @param pUVM The user mode VM handle.
4151 */
4152VMMR3_INT_DECL(uint32_t) VMR3GetRuntimeErrorCount(PUVM pUVM)
4153{
4154 return pUVM->vm.s.cRuntimeErrors;
4155}
4156
4157
4158/**
4159 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4160 *
4161 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4162 *
4163 * @param pVM The cross context VM structure.
4164 */
4165VMMR3_INT_DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4166{
4167 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4168 return pUVCpu
4169 ? pUVCpu->idCpu
4170 : NIL_VMCPUID;
4171}
4172
4173
4174/**
4175 * Checks if the VM is long-mode (64-bit) capable or not.
4176 *
4177 * @returns true if VM can operate in long-mode, false otherwise.
4178 * @param pVM The cross context VM structure.
4179 */
4180VMMR3_INT_DECL(bool) VMR3IsLongModeAllowed(PVM pVM)
4181{
4182 switch (pVM->bMainExecutionEngine)
4183 {
4184 case VM_EXEC_ENGINE_HW_VIRT:
4185 return HMIsLongModeAllowed(pVM);
4186
4187 case VM_EXEC_ENGINE_NATIVE_API:
4188 return NEMHCIsLongModeAllowed(pVM);
4189
4190 case VM_EXEC_ENGINE_NOT_SET:
4191 AssertFailed();
4192 RT_FALL_THRU();
4193 default:
4194 return false;
4195 }
4196}
4197
4198
4199/**
4200 * Returns the native ID of the current EMT VMCPU thread.
4201 *
4202 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4203 * @param pVM The cross context VM structure.
4204 * @thread EMT
4205 */
4206VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4207{
4208 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4209
4210 if (!pUVCpu)
4211 return NIL_RTNATIVETHREAD;
4212
4213 return pUVCpu->vm.s.NativeThreadEMT;
4214}
4215
4216
4217/**
4218 * Returns the native ID of the current EMT VMCPU thread.
4219 *
4220 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4221 * @param pUVM The user mode VM structure.
4222 * @thread EMT
4223 */
4224VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4225{
4226 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4227
4228 if (!pUVCpu)
4229 return NIL_RTNATIVETHREAD;
4230
4231 return pUVCpu->vm.s.NativeThreadEMT;
4232}
4233
4234
4235/**
4236 * Returns the handle of the current EMT VMCPU thread.
4237 *
4238 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4239 * @param pUVM The user mode VM handle.
4240 * @thread EMT
4241 */
4242VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PUVM pUVM)
4243{
4244 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4245
4246 if (!pUVCpu)
4247 return NIL_RTTHREAD;
4248
4249 return pUVCpu->vm.s.ThreadEMT;
4250}
4251
4252
4253/**
4254 * Returns the handle of the current EMT VMCPU thread.
4255 *
4256 * @returns The IPRT thread handle.
4257 * @param pUVCpu The user mode CPU handle.
4258 * @thread EMT
4259 */
4260VMMR3_INT_DECL(RTTHREAD) VMR3GetThreadHandle(PUVMCPU pUVCpu)
4261{
4262 return pUVCpu->vm.s.ThreadEMT;
4263}
4264
4265
4266/**
4267 * Return the package and core ID of a CPU.
4268 *
4269 * @returns VBOX status code.
4270 * @param pUVM The user mode VM handle.
4271 * @param idCpu Virtual CPU to get the ID from.
4272 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4273 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4274 *
4275 */
4276VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PUVM pUVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4277{
4278 /*
4279 * Validate input.
4280 */
4281 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4282 PVM pVM = pUVM->pVM;
4283 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4284 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4285 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4286 if (idCpu >= pVM->cCpus)
4287 return VERR_INVALID_CPU_ID;
4288
4289 /*
4290 * Set return values.
4291 */
4292#ifdef VBOX_WITH_MULTI_CORE
4293 *pidCpuCore = idCpu;
4294 *pidCpuPackage = 0;
4295#else
4296 *pidCpuCore = 0;
4297 *pidCpuPackage = idCpu;
4298#endif
4299
4300 return VINF_SUCCESS;
4301}
4302
4303
4304/**
4305 * Worker for VMR3HotUnplugCpu.
4306 *
4307 * @returns VINF_EM_WAIT_SPIP (strict status code).
4308 * @param pVM The cross context VM structure.
4309 * @param idCpu The current CPU.
4310 */
4311static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4312{
4313 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4314 VMCPU_ASSERT_EMT(pVCpu);
4315
4316 /*
4317 * Reset per CPU resources.
4318 *
4319 * Actually only needed for VT-x because the CPU seems to be still in some
4320 * paged mode and startup fails after a new hot plug event. SVM works fine
4321 * even without this.
4322 */
4323 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4324 PGMR3ResetCpu(pVM, pVCpu);
4325 PDMR3ResetCpu(pVCpu);
4326 TRPMR3ResetCpu(pVCpu);
4327 CPUMR3ResetCpu(pVM, pVCpu);
4328 EMR3ResetCpu(pVCpu);
4329 HMR3ResetCpu(pVCpu);
4330 NEMR3ResetCpu(pVCpu, false /*fInitIpi*/);
4331 return VINF_EM_WAIT_SIPI;
4332}
4333
4334
4335/**
4336 * Hot-unplugs a CPU from the guest.
4337 *
4338 * @returns VBox status code.
4339 * @param pUVM The user mode VM handle.
4340 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4341 */
4342VMMR3DECL(int) VMR3HotUnplugCpu(PUVM pUVM, VMCPUID idCpu)
4343{
4344 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4345 PVM pVM = pUVM->pVM;
4346 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4347 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4348
4349 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4350 * broadcast requests. Just note down somewhere that the CPU is
4351 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4352 * it out of the EM loops when offline. */
4353 return VMR3ReqCallNoWaitU(pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4354}
4355
4356
4357/**
4358 * Hot-plugs a CPU on the guest.
4359 *
4360 * @returns VBox status code.
4361 * @param pUVM The user mode VM handle.
4362 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4363 */
4364VMMR3DECL(int) VMR3HotPlugCpu(PUVM pUVM, VMCPUID idCpu)
4365{
4366 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4367 PVM pVM = pUVM->pVM;
4368 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4369 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4370
4371 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4372 return VINF_SUCCESS;
4373}
4374
4375
4376/**
4377 * Changes the VMM execution cap.
4378 *
4379 * @returns VBox status code.
4380 * @param pUVM The user mode VM structure.
4381 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4382 * 100 is max performance (default).
4383 */
4384VMMR3DECL(int) VMR3SetCpuExecutionCap(PUVM pUVM, uint32_t uCpuExecutionCap)
4385{
4386 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4387 PVM pVM = pUVM->pVM;
4388 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4389 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4390
4391 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4392 /* Note: not called from EMT. */
4393 pVM->uCpuExecutionCap = uCpuExecutionCap;
4394 return VINF_SUCCESS;
4395}
4396
4397
4398/**
4399 * Control whether the VM should power off when resetting.
4400 *
4401 * @returns VBox status code.
4402 * @param pUVM The user mode VM handle.
4403 * @param fPowerOffInsteadOfReset Flag whether the VM should power off when
4404 * resetting.
4405 */
4406VMMR3DECL(int) VMR3SetPowerOffInsteadOfReset(PUVM pUVM, bool fPowerOffInsteadOfReset)
4407{
4408 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4409 PVM pVM = pUVM->pVM;
4410 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4411
4412 /* Note: not called from EMT. */
4413 pVM->vm.s.fPowerOffInsteadOfReset = fPowerOffInsteadOfReset;
4414 return VINF_SUCCESS;
4415}
4416
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette