VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 86523

Last change on this file since 86523 was 86327, checked in by vboxsync, 4 years ago

Debugger: Allow for different I/O providers instead of only TCP

So far TCP was the only option to communicate remotely with the internal debugger, the other option
was to use the console from the GUI directly. This commit reworks basic I/O to allow for different
providers where TCP is just one option. The second one being introduced is an IPC provider using a local
socket or named pipe depending on the platform. This allows for Windows kernel debugging over a pipe
using the KD stub in VirtualBox and WinDbg running on the host (not tested yet).

Furthermore this commit allows multiple stubs to be listening for connections at the same time, so
one can have a GDB stub listening on one TCP port and the native VBox debugger listening on another one
or even using a different I/O provider. Only one session can be active at a time though, because sharing
debugger states is impossible. To configure this the following CFGM keys need to be set for each listener:

"DBGC/<Some unique ID>/Provider" "tcp|ipc"
"DBGC/<Some unique ID>/StubType" "native|gdb|kd"
"DBGC/<Some unique ID>/Address" "<ip>|<local named pipe or socket path>"
"DBGC/<Some unique ID>/Port" "<port>" (for TCP only)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 158.5 KB
Line 
1/* $Id: VM.cpp 86327 2020-09-28 16:20:50Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41
42/*********************************************************************************************************************************
43* Header Files *
44*********************************************************************************************************************************/
45#define LOG_GROUP LOG_GROUP_VM
46#include <VBox/vmm/cfgm.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/vmm/gvmm.h>
49#include <VBox/vmm/mm.h>
50#include <VBox/vmm/cpum.h>
51#include <VBox/vmm/selm.h>
52#include <VBox/vmm/trpm.h>
53#include <VBox/vmm/dbgf.h>
54#include <VBox/vmm/pgm.h>
55#include <VBox/vmm/pdmapi.h>
56#include <VBox/vmm/pdmdev.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/em.h>
59#include <VBox/vmm/iem.h>
60#include <VBox/vmm/nem.h>
61#include <VBox/vmm/apic.h>
62#include <VBox/vmm/tm.h>
63#include <VBox/vmm/stam.h>
64#include <VBox/vmm/iom.h>
65#include <VBox/vmm/ssm.h>
66#include <VBox/vmm/hm.h>
67#include <VBox/vmm/gim.h>
68#include "VMInternal.h"
69#include <VBox/vmm/vmcc.h>
70
71#include <VBox/sup.h>
72#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
73# include <VBox/VBoxTpG.h>
74#endif
75#include <VBox/dbg.h>
76#include <VBox/err.h>
77#include <VBox/param.h>
78#include <VBox/log.h>
79#include <iprt/assert.h>
80#include <iprt/alloca.h>
81#include <iprt/asm.h>
82#include <iprt/env.h>
83#include <iprt/mem.h>
84#include <iprt/semaphore.h>
85#include <iprt/string.h>
86#include <iprt/time.h>
87#include <iprt/thread.h>
88#include <iprt/uuid.h>
89
90
91/*********************************************************************************************************************************
92* Internal Functions *
93*********************************************************************************************************************************/
94static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
95static DECLCALLBACK(int) vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
96static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus);
97static int vmR3InitRing3(PVM pVM, PUVM pUVM);
98static int vmR3InitRing0(PVM pVM);
99static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
100static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
101static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
102static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
103static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
104static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF);
105static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
106static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...) RT_IPRT_FORMAT_ATTR(6, 7);
107
108
109/**
110 * Creates a virtual machine by calling the supplied configuration constructor.
111 *
112 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
113 * called to start the execution.
114 *
115 * @returns 0 on success.
116 * @returns VBox error code on failure.
117 * @param cCpus Number of virtual CPUs for the new VM.
118 * @param pVmm2UserMethods An optional method table that the VMM can use
119 * to make the user perform various action, like
120 * for instance state saving.
121 * @param pfnVMAtError Pointer to callback function for setting VM
122 * errors. This was added as an implicit call to
123 * VMR3AtErrorRegister() since there is no way the
124 * caller can get to the VM handle early enough to
125 * do this on its own.
126 * This is called in the context of an EMT.
127 * @param pvUserVM The user argument passed to pfnVMAtError.
128 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
129 * This is called in the context of an EMT0.
130 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
131 * @param ppVM Where to optionally store the 'handle' of the
132 * created VM.
133 * @param ppUVM Where to optionally store the user 'handle' of
134 * the created VM, this includes one reference as
135 * if VMR3RetainUVM() was called. The caller
136 * *MUST* remember to pass the returned value to
137 * VMR3ReleaseUVM() once done with the handle.
138 */
139VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
140 PFNVMATERROR pfnVMAtError, void *pvUserVM,
141 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
142 PVM *ppVM, PUVM *ppUVM)
143{
144 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p ppUVM=%p\n",
145 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM, ppUVM));
146
147 if (pVmm2UserMethods)
148 {
149 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
150 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
151 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
152 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
153 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
154 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
155 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
156 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
157 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff, VERR_INVALID_POINTER);
158 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
159 }
160 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
161 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
162 AssertPtrNullReturn(ppVM, VERR_INVALID_POINTER);
163 AssertPtrNullReturn(ppUVM, VERR_INVALID_POINTER);
164 AssertReturn(ppVM || ppUVM, VERR_INVALID_PARAMETER);
165
166 /*
167 * Validate input.
168 */
169 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
170
171 /*
172 * Create the UVM so we can register the at-error callback
173 * and consolidate a bit of cleanup code.
174 */
175 PUVM pUVM = NULL; /* shuts up gcc */
176 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
177 if (RT_FAILURE(rc))
178 return rc;
179 if (pfnVMAtError)
180 rc = VMR3AtErrorRegister(pUVM, pfnVMAtError, pvUserVM);
181 if (RT_SUCCESS(rc))
182 {
183 /*
184 * Initialize the support library creating the session for this VM.
185 */
186 rc = SUPR3Init(&pUVM->vm.s.pSession);
187 if (RT_SUCCESS(rc))
188 {
189#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
190 /* Now that we've opened the device, we can register trace probes. */
191 static bool s_fRegisteredProbes = false;
192 if (ASMAtomicCmpXchgBool(&s_fRegisteredProbes, true, false))
193 SUPR3TracerRegisterModule(~(uintptr_t)0, "VBoxVMM", &g_VTGObjHeader, (uintptr_t)&g_VTGObjHeader,
194 SUP_TRACER_UMOD_FLAGS_SHARED);
195#endif
196
197 /*
198 * Call vmR3CreateU in the EMT thread and wait for it to finish.
199 *
200 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
201 * submitting a request to a specific VCPU without a pVM. So, to make
202 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
203 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
204 */
205 PVMREQ pReq;
206 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
207 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
208 if (RT_SUCCESS(rc))
209 {
210 rc = pReq->iStatus;
211 VMR3ReqFree(pReq);
212 if (RT_SUCCESS(rc))
213 {
214 /*
215 * Success!
216 */
217 if (ppVM)
218 *ppVM = pUVM->pVM;
219 if (ppUVM)
220 {
221 VMR3RetainUVM(pUVM);
222 *ppUVM = pUVM;
223 }
224 LogFlow(("VMR3Create: returns VINF_SUCCESS (pVM=%p, pUVM=%p\n", pUVM->pVM, pUVM));
225 return VINF_SUCCESS;
226 }
227 }
228 else
229 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
230
231 /*
232 * An error occurred during VM creation. Set the error message directly
233 * using the initial callback, as the callback list might not exist yet.
234 */
235 const char *pszError;
236 switch (rc)
237 {
238 case VERR_VMX_IN_VMX_ROOT_MODE:
239#ifdef RT_OS_LINUX
240 pszError = N_("VirtualBox can't operate in VMX root mode. "
241 "Please disable the KVM kernel extension, recompile your kernel and reboot");
242#else
243 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
244#endif
245 break;
246
247#ifndef RT_OS_DARWIN
248 case VERR_HM_CONFIG_MISMATCH:
249 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
250 "This hardware extension is required by the VM configuration");
251 break;
252#endif
253
254 case VERR_SVM_IN_USE:
255#ifdef RT_OS_LINUX
256 pszError = N_("VirtualBox can't enable the AMD-V extension. "
257 "Please disable the KVM kernel extension, recompile your kernel and reboot");
258#else
259 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
260#endif
261 break;
262
263#ifdef RT_OS_LINUX
264 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
265 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
266 "that VirtualBox is correctly installed, and if you are using EFI "
267 "Secure Boot that the modules are signed if necessary in the right "
268 "way for your host system. Then try to recompile and reload the "
269 "kernel modules by executing "
270 "'/sbin/vboxconfig' as root");
271 break;
272#endif
273
274 case VERR_RAW_MODE_INVALID_SMP:
275 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
276 "VirtualBox requires this hardware extension to emulate more than one "
277 "guest CPU");
278 break;
279
280 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
281#ifdef RT_OS_LINUX
282 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
283 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
284 "the VT-x extension in the VM settings. Note that without VT-x you have "
285 "to reduce the number of guest CPUs to one");
286#else
287 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
288 "extension. Either upgrade your kernel or disable the VT-x extension in the "
289 "VM settings. Note that without VT-x you have to reduce the number of guest "
290 "CPUs to one");
291#endif
292 break;
293
294 case VERR_PDM_DEVICE_NOT_FOUND:
295 pszError = N_("A virtual device is configured in the VM settings but the device "
296 "implementation is missing.\n"
297 "A possible reason for this error is a missing extension pack. Note "
298 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
299 "support and remote desktop) are only available from an 'extension "
300 "pack' which must be downloaded and installed separately");
301 break;
302
303 case VERR_PCI_PASSTHROUGH_NO_HM:
304 pszError = N_("PCI passthrough requires VT-x/AMD-V");
305 break;
306
307 case VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING:
308 pszError = N_("PCI passthrough requires nested paging");
309 break;
310
311 default:
312 if (VMR3GetErrorCount(pUVM) == 0)
313 {
314 pszError = (char *)alloca(1024);
315 RTErrQueryMsgFull(rc, (char *)pszError, 1024, false /*fFailIfUnknown*/);
316 }
317 else
318 pszError = NULL; /* already set. */
319 break;
320 }
321 if (pszError)
322 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
323 }
324 else
325 {
326 /*
327 * An error occurred at support library initialization time (before the
328 * VM could be created). Set the error message directly using the
329 * initial callback, as the callback list doesn't exist yet.
330 */
331 const char *pszError;
332 switch (rc)
333 {
334 case VERR_VM_DRIVER_LOAD_ERROR:
335#ifdef RT_OS_LINUX
336 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
337 "was either not loaded, /dev/vboxdrv is not set up properly, "
338 "or you are using EFI Secure Boot and the module is not signed "
339 "in the right way for your system. If necessary, try setting up "
340 "the kernel module again by executing "
341 "'/sbin/vboxconfig' as root");
342#else
343 pszError = N_("VirtualBox kernel driver not loaded");
344#endif
345 break;
346 case VERR_VM_DRIVER_OPEN_ERROR:
347 pszError = N_("VirtualBox kernel driver cannot be opened");
348 break;
349 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
350#ifdef VBOX_WITH_HARDENING
351 /* This should only happen if the executable wasn't hardened - bad code/build. */
352 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
353 "Re-install VirtualBox. If you are building it yourself, you "
354 "should make sure it installed correctly and that the setuid "
355 "bit is set on the executables calling VMR3Create.");
356#else
357 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
358# if defined(RT_OS_DARWIN)
359 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
360 "If you have built VirtualBox yourself, make sure that you do not "
361 "have the vboxdrv KEXT from a different build or installation loaded.");
362# elif defined(RT_OS_LINUX)
363 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
364 "If you have built VirtualBox yourself, make sure that you do "
365 "not have the vboxdrv kernel module from a different build or "
366 "installation loaded. Also, make sure the vboxdrv udev rule gives "
367 "you the permission you need to access the device.");
368# elif defined(RT_OS_WINDOWS)
369 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
370# else /* solaris, freebsd, ++. */
371 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
372 "If you have built VirtualBox yourself, make sure that you do "
373 "not have the vboxdrv kernel module from a different install loaded.");
374# endif
375#endif
376 break;
377 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
378 case VERR_VM_DRIVER_NOT_INSTALLED:
379#ifdef RT_OS_LINUX
380 pszError = N_("VirtualBox kernel driver not Installed. The vboxdrv kernel module "
381 "was either not loaded, /dev/vboxdrv is not set up properly, "
382 "or you are using EFI Secure Boot and the module is not signed "
383 "in the right way for your system. If necessary, try setting up "
384 "the kernel module again by executing "
385 "'/sbin/vboxconfig' as root");
386#else
387 pszError = N_("VirtualBox kernel driver not installed");
388#endif
389 break;
390 case VERR_NO_MEMORY:
391 pszError = N_("VirtualBox support library out of memory");
392 break;
393 case VERR_VERSION_MISMATCH:
394 case VERR_VM_DRIVER_VERSION_MISMATCH:
395 pszError = N_("The VirtualBox support driver which is running is from a different "
396 "version of VirtualBox. You can correct this by stopping all "
397 "running instances of VirtualBox and reinstalling the software.");
398 break;
399 default:
400 pszError = N_("Unknown error initializing kernel driver");
401 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
402 }
403 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
404 }
405 }
406
407 /* cleanup */
408 vmR3DestroyUVM(pUVM, 2000);
409 LogFlow(("VMR3Create: returns %Rrc\n", rc));
410 return rc;
411}
412
413
414/**
415 * Creates the UVM.
416 *
417 * This will not initialize the support library even if vmR3DestroyUVM
418 * will terminate that.
419 *
420 * @returns VBox status code.
421 * @param cCpus Number of virtual CPUs
422 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
423 * table.
424 * @param ppUVM Where to store the UVM pointer.
425 */
426static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
427{
428 uint32_t i;
429
430 /*
431 * Create and initialize the UVM.
432 */
433 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_UOFFSETOF_DYN(UVM, aCpus[cCpus]));
434 AssertReturn(pUVM, VERR_NO_MEMORY);
435 pUVM->u32Magic = UVM_MAGIC;
436 pUVM->cCpus = cCpus;
437 pUVM->pVmm2UserMethods = pVmm2UserMethods;
438
439 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
440
441 pUVM->vm.s.cUvmRefs = 1;
442 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
443 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
444 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
445
446 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
447 RTUuidClear(&pUVM->vm.s.Uuid);
448
449 /* Initialize the VMCPU array in the UVM. */
450 for (i = 0; i < cCpus; i++)
451 {
452 pUVM->aCpus[i].pUVM = pUVM;
453 pUVM->aCpus[i].idCpu = i;
454 }
455
456 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
457 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
458 AssertRC(rc);
459 if (RT_SUCCESS(rc))
460 {
461 /* Allocate a halt method event semaphore for each VCPU. */
462 for (i = 0; i < cCpus; i++)
463 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
464 for (i = 0; i < cCpus; i++)
465 {
466 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
467 if (RT_FAILURE(rc))
468 break;
469 }
470 if (RT_SUCCESS(rc))
471 {
472 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
473 if (RT_SUCCESS(rc))
474 {
475 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
476 if (RT_SUCCESS(rc))
477 {
478 /*
479 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
480 */
481 rc = PDMR3InitUVM(pUVM);
482 if (RT_SUCCESS(rc))
483 {
484 rc = STAMR3InitUVM(pUVM);
485 if (RT_SUCCESS(rc))
486 {
487 rc = MMR3InitUVM(pUVM);
488 if (RT_SUCCESS(rc))
489 {
490 /*
491 * Start the emulation threads for all VMCPUs.
492 */
493 for (i = 0; i < cCpus; i++)
494 {
495 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i],
496 _1M, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
497 cCpus > 1 ? "EMT-%u" : "EMT", i);
498 if (RT_FAILURE(rc))
499 break;
500
501 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
502 }
503
504 if (RT_SUCCESS(rc))
505 {
506 *ppUVM = pUVM;
507 return VINF_SUCCESS;
508 }
509
510 /* bail out. */
511 while (i-- > 0)
512 {
513 /** @todo rainy day: terminate the EMTs. */
514 }
515 MMR3TermUVM(pUVM);
516 }
517 STAMR3TermUVM(pUVM);
518 }
519 PDMR3TermUVM(pUVM);
520 }
521 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
522 }
523 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
524 }
525 }
526 for (i = 0; i < cCpus; i++)
527 {
528 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
529 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
530 }
531 RTTlsFree(pUVM->vm.s.idxTLS);
532 }
533 RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
534 return rc;
535}
536
537
538/**
539 * Creates and initializes the VM.
540 *
541 * @thread EMT
542 */
543static DECLCALLBACK(int) vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
544{
545#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
546 /*
547 * Require SSE2 to be present (already checked for in supdrv, so we
548 * shouldn't ever really get here).
549 */
550 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
551 {
552 LogRel(("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1)));
553 return VERR_UNSUPPORTED_CPU;
554 }
555#endif
556
557 /*
558 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
559 */
560 int rc = PDMR3LdrLoadVMMR0U(pUVM);
561 if (RT_FAILURE(rc))
562 {
563 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
564 * bird: what about moving the message down here? Main picks the first message, right? */
565 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
566 return rc; /* proper error message set later on */
567 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
568 }
569
570 /*
571 * Request GVMM to create a new VM for us.
572 */
573 GVMMCREATEVMREQ CreateVMReq;
574 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
575 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
576 CreateVMReq.pSession = pUVM->vm.s.pSession;
577 CreateVMReq.pVMR0 = NIL_RTR0PTR;
578 CreateVMReq.pVMR3 = NULL;
579 CreateVMReq.cCpus = cCpus;
580 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
581 if (RT_SUCCESS(rc))
582 {
583 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
584 AssertRelease(VALID_PTR(pVM));
585 AssertRelease(pVM->pVMR0ForCall == CreateVMReq.pVMR0);
586 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
587 AssertRelease(pVM->cCpus == cCpus);
588 AssertRelease(pVM->uCpuExecutionCap == 100);
589 AssertCompileMemberAlignment(VM, cpum, 64);
590 AssertCompileMemberAlignment(VM, tm, 64);
591
592 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
593 pUVM, pVM, CreateVMReq.pVMR0, pVM->hSelf, pVM->cCpus));
594
595 /*
596 * Initialize the VM structure and our internal data (VMINT).
597 */
598 pVM->pUVM = pUVM;
599
600 for (VMCPUID i = 0; i < pVM->cCpus; i++)
601 {
602 PVMCPU pVCpu = pVM->apCpusR3[i];
603 pVCpu->pUVCpu = &pUVM->aCpus[i];
604 pVCpu->idCpu = i;
605 pVCpu->hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
606 Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD);
607 /* hNativeThreadR0 is initialized on EMT registration. */
608 pUVM->aCpus[i].pVCpu = pVCpu;
609 pUVM->aCpus[i].pVM = pVM;
610 }
611
612
613 /*
614 * Init the configuration.
615 */
616 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
617 if (RT_SUCCESS(rc))
618 {
619 rc = vmR3ReadBaseConfig(pVM, pUVM, cCpus);
620 if (RT_SUCCESS(rc))
621 {
622 /*
623 * Init the ring-3 components and ring-3 per cpu data, finishing it off
624 * by a relocation round (intermediate context finalization will do this).
625 */
626 rc = vmR3InitRing3(pVM, pUVM);
627 if (RT_SUCCESS(rc))
628 {
629#ifndef PGM_WITHOUT_MAPPINGS
630 rc = PGMR3FinalizeMappings(pVM);
631 if (RT_SUCCESS(rc))
632#endif
633 {
634
635 LogFlow(("Ring-3 init succeeded\n"));
636
637 /*
638 * Init the Ring-0 components.
639 */
640 rc = vmR3InitRing0(pVM);
641 if (RT_SUCCESS(rc))
642 {
643 /* Relocate again, because some switcher fixups depends on R0 init results. */
644 VMR3Relocate(pVM, 0 /* offDelta */);
645
646#ifdef VBOX_WITH_DEBUGGER
647 /*
648 * Init the tcp debugger console if we're building
649 * with debugger support.
650 */
651 void *pvUser = NULL;
652 rc = DBGCIoCreate(pUVM, &pvUser);
653 if ( RT_SUCCESS(rc)
654 || rc == VERR_NET_ADDRESS_IN_USE)
655 {
656 pUVM->vm.s.pvDBGC = pvUser;
657#endif
658 /*
659 * Now we can safely set the VM halt method to default.
660 */
661 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
662 if (RT_SUCCESS(rc))
663 {
664 /*
665 * Set the state and we're done.
666 */
667 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
668 return VINF_SUCCESS;
669 }
670#ifdef VBOX_WITH_DEBUGGER
671 DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
672 pUVM->vm.s.pvDBGC = NULL;
673 }
674#endif
675 //..
676 }
677 }
678 vmR3Destroy(pVM);
679 }
680 }
681 //..
682
683 /* Clean CFGM. */
684 int rc2 = CFGMR3Term(pVM);
685 AssertRC(rc2);
686 }
687
688 /*
689 * Do automatic cleanups while the VM structure is still alive and all
690 * references to it are still working.
691 */
692 PDMR3CritSectBothTerm(pVM);
693
694 /*
695 * Drop all references to VM and the VMCPU structures, then
696 * tell GVMM to destroy the VM.
697 */
698 pUVM->pVM = NULL;
699 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
700 {
701 pUVM->aCpus[i].pVM = NULL;
702 pUVM->aCpus[i].pVCpu = NULL;
703 }
704 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
705
706 if (pUVM->cCpus > 1)
707 {
708 /* Poke the other EMTs since they may have stale pVM and pVCpu references
709 on the stack (see VMR3WaitU for instance) if they've been awakened after
710 VM creation. */
711 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
712 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
713 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
714 }
715
716 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
717 AssertRC(rc2);
718 }
719 else
720 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
721
722 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
723 return rc;
724}
725
726
727/**
728 * Reads the base configuation from CFGM.
729 *
730 * @returns VBox status code.
731 * @param pVM The cross context VM structure.
732 * @param pUVM The user mode VM structure.
733 * @param cCpus The CPU count given to VMR3Create.
734 */
735static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus)
736{
737 int rc;
738 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
739
740 /*
741 * Base EM and HM config properties.
742 */
743 pVM->fHMEnabled = true;
744
745 /*
746 * Make sure the CPU count in the config data matches.
747 */
748 uint32_t cCPUsCfg;
749 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
750 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc), rc);
751 AssertLogRelMsgReturn(cCPUsCfg == cCpus,
752 ("Configuration error: \"NumCPUs\"=%RU32 and VMR3Create::cCpus=%RU32 does not match!\n",
753 cCPUsCfg, cCpus),
754 VERR_INVALID_PARAMETER);
755
756 /*
757 * Get the CPU execution cap.
758 */
759 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
760 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc), rc);
761
762 /*
763 * Get the VM name and UUID.
764 */
765 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
766 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc), rc);
767
768 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
769 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
770 rc = VINF_SUCCESS;
771 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc), rc);
772
773 rc = CFGMR3QueryBoolDef(pRoot, "PowerOffInsteadOfReset", &pVM->vm.s.fPowerOffInsteadOfReset, false);
774 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"PowerOffInsteadOfReset\" failed, rc=%Rrc\n", rc), rc);
775
776 return VINF_SUCCESS;
777}
778
779
780/**
781 * Register the calling EMT with GVM.
782 *
783 * @returns VBox status code.
784 * @param pVM The cross context VM structure.
785 * @param idCpu The Virtual CPU ID.
786 */
787static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
788{
789 Assert(VMMGetCpuId(pVM) == idCpu);
790 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
791 if (RT_FAILURE(rc))
792 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
793 return rc;
794}
795
796
797/**
798 * Initializes all R3 components of the VM
799 */
800static int vmR3InitRing3(PVM pVM, PUVM pUVM)
801{
802 int rc;
803
804 /*
805 * Register the other EMTs with GVM.
806 */
807 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
808 {
809 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
810 if (RT_FAILURE(rc))
811 return rc;
812 }
813
814 /*
815 * Register statistics.
816 */
817 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
818 {
819 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/CPU%d/VM/Halt/Yield", idCpu);
820 AssertRC(rc);
821 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/CPU%d/VM/Halt/Block", idCpu);
822 AssertRC(rc);
823 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/CPU%d/VM/Halt/BlockOverslept", idCpu);
824 AssertRC(rc);
825 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/CPU%d/VM/Halt/BlockInsomnia", idCpu);
826 AssertRC(rc);
827 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/CPU%d/VM/Halt/BlockOnTime", idCpu);
828 AssertRC(rc);
829 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/CPU%d/VM/Halt/Timers", idCpu);
830 AssertRC(rc);
831 }
832
833 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
834 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
835 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
836 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
837 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
838 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
839 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
840 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
841
842 /* Statistics for ring-0 components: */
843 STAM_REL_REG(pVM, &pVM->R0Stats.gmm.cChunkTlbHits, STAMTYPE_COUNTER, "/GMM/ChunkTlbHits", STAMUNIT_OCCURENCES, "GMMR0PageIdToVirt chunk TBL hits");
844 STAM_REL_REG(pVM, &pVM->R0Stats.gmm.cChunkTlbMisses, STAMTYPE_COUNTER, "/GMM/ChunkTlbMisses", STAMUNIT_OCCURENCES, "GMMR0PageIdToVirt chunk TBL misses");
845
846 /*
847 * Init all R3 components, the order here might be important.
848 * NEM and HM shall be initialized first!
849 */
850 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
851 rc = NEMR3InitConfig(pVM);
852 if (RT_SUCCESS(rc))
853 rc = HMR3Init(pVM);
854 if (RT_SUCCESS(rc))
855 {
856 ASMCompilerBarrier(); /* HMR3Init will have modified bMainExecutionEngine */
857 Assert( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT
858 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API);
859 rc = MMR3Init(pVM);
860 if (RT_SUCCESS(rc))
861 {
862 rc = CPUMR3Init(pVM);
863 if (RT_SUCCESS(rc))
864 {
865 rc = NEMR3InitAfterCPUM(pVM);
866 if (RT_SUCCESS(rc))
867 rc = PGMR3Init(pVM);
868 if (RT_SUCCESS(rc))
869 {
870 rc = MMR3InitPaging(pVM);
871 if (RT_SUCCESS(rc))
872 rc = TMR3Init(pVM);
873 if (RT_SUCCESS(rc))
874 {
875 rc = VMMR3Init(pVM);
876 if (RT_SUCCESS(rc))
877 {
878 rc = SELMR3Init(pVM);
879 if (RT_SUCCESS(rc))
880 {
881 rc = TRPMR3Init(pVM);
882 if (RT_SUCCESS(rc))
883 {
884 rc = SSMR3RegisterStub(pVM, "CSAM", 0);
885 if (RT_SUCCESS(rc))
886 {
887 rc = SSMR3RegisterStub(pVM, "PATM", 0);
888 if (RT_SUCCESS(rc))
889 {
890 rc = IOMR3Init(pVM);
891 if (RT_SUCCESS(rc))
892 {
893 rc = EMR3Init(pVM);
894 if (RT_SUCCESS(rc))
895 {
896 rc = IEMR3Init(pVM);
897 if (RT_SUCCESS(rc))
898 {
899 rc = DBGFR3Init(pVM);
900 if (RT_SUCCESS(rc))
901 {
902 /* GIM must be init'd before PDM, gimdevR3Construct()
903 requires GIM provider to be setup. */
904 rc = GIMR3Init(pVM);
905 if (RT_SUCCESS(rc))
906 {
907 rc = PDMR3Init(pVM);
908 if (RT_SUCCESS(rc))
909 {
910 rc = PGMR3InitDynMap(pVM);
911 if (RT_SUCCESS(rc))
912 rc = MMR3HyperInitFinalize(pVM);
913 if (RT_SUCCESS(rc))
914 rc = PGMR3InitFinalize(pVM);
915 if (RT_SUCCESS(rc))
916 rc = TMR3InitFinalize(pVM);
917 if (RT_SUCCESS(rc))
918 {
919 PGMR3MemSetup(pVM, false /*fAtReset*/);
920 PDMR3MemSetup(pVM, false /*fAtReset*/);
921 }
922 if (RT_SUCCESS(rc))
923 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
924 if (RT_SUCCESS(rc))
925 {
926 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
927 return VINF_SUCCESS;
928 }
929
930 int rc2 = PDMR3Term(pVM);
931 AssertRC(rc2);
932 }
933 int rc2 = GIMR3Term(pVM);
934 AssertRC(rc2);
935 }
936 int rc2 = DBGFR3Term(pVM);
937 AssertRC(rc2);
938 }
939 int rc2 = IEMR3Term(pVM);
940 AssertRC(rc2);
941 }
942 int rc2 = EMR3Term(pVM);
943 AssertRC(rc2);
944 }
945 int rc2 = IOMR3Term(pVM);
946 AssertRC(rc2);
947 }
948 }
949 }
950 int rc2 = TRPMR3Term(pVM);
951 AssertRC(rc2);
952 }
953 int rc2 = SELMR3Term(pVM);
954 AssertRC(rc2);
955 }
956 int rc2 = VMMR3Term(pVM);
957 AssertRC(rc2);
958 }
959 int rc2 = TMR3Term(pVM);
960 AssertRC(rc2);
961 }
962 int rc2 = PGMR3Term(pVM);
963 AssertRC(rc2);
964 }
965 //int rc2 = CPUMR3Term(pVM);
966 //AssertRC(rc2);
967 }
968 /* MMR3Term is not called here because it'll kill the heap. */
969 }
970 int rc2 = HMR3Term(pVM);
971 AssertRC(rc2);
972 }
973 NEMR3Term(pVM);
974
975 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
976 return rc;
977}
978
979
980/**
981 * Initializes all R0 components of the VM.
982 */
983static int vmR3InitRing0(PVM pVM)
984{
985 LogFlow(("vmR3InitRing0:\n"));
986
987 /*
988 * Check for FAKE suplib mode.
989 */
990 int rc = VINF_SUCCESS;
991 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
992 if (!psz || strcmp(psz, "fake"))
993 {
994 /*
995 * Call the VMMR0 component and let it do the init.
996 */
997 rc = VMMR3InitR0(pVM);
998 }
999 else
1000 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1001
1002 /*
1003 * Do notifications and return.
1004 */
1005 if (RT_SUCCESS(rc))
1006 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1007 if (RT_SUCCESS(rc))
1008 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HM);
1009
1010 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1011 return rc;
1012}
1013
1014
1015/**
1016 * Do init completed notifications.
1017 *
1018 * @returns VBox status code.
1019 * @param pVM The cross context VM structure.
1020 * @param enmWhat What's completed.
1021 */
1022static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1023{
1024 int rc = VMMR3InitCompleted(pVM, enmWhat);
1025 if (RT_SUCCESS(rc))
1026 rc = HMR3InitCompleted(pVM, enmWhat);
1027 if (RT_SUCCESS(rc))
1028 rc = NEMR3InitCompleted(pVM, enmWhat);
1029 if (RT_SUCCESS(rc))
1030 rc = PGMR3InitCompleted(pVM, enmWhat);
1031 if (RT_SUCCESS(rc))
1032 rc = CPUMR3InitCompleted(pVM, enmWhat);
1033 if (RT_SUCCESS(rc))
1034 rc = EMR3InitCompleted(pVM, enmWhat);
1035 if (enmWhat == VMINITCOMPLETED_RING3)
1036 {
1037 if (RT_SUCCESS(rc))
1038 rc = SSMR3RegisterStub(pVM, "rem", 1);
1039 }
1040 if (RT_SUCCESS(rc))
1041 rc = PDMR3InitCompleted(pVM, enmWhat);
1042
1043 /* IOM *must* come after PDM, as device (DevPcArch) may register some final
1044 handlers in their init completion method. */
1045 if (RT_SUCCESS(rc))
1046 rc = IOMR3InitCompleted(pVM, enmWhat);
1047 return rc;
1048}
1049
1050
1051/**
1052 * Calls the relocation functions for all VMM components so they can update
1053 * any GC pointers. When this function is called all the basic VM members
1054 * have been updated and the actual memory relocation have been done
1055 * by the PGM/MM.
1056 *
1057 * This is used both on init and on runtime relocations.
1058 *
1059 * @param pVM The cross context VM structure.
1060 * @param offDelta Relocation delta relative to old location.
1061 */
1062VMMR3_INT_DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1063{
1064 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1065
1066 /*
1067 * The order here is very important!
1068 */
1069 PGMR3Relocate(pVM, offDelta);
1070 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1071 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1072 CPUMR3Relocate(pVM);
1073 HMR3Relocate(pVM);
1074 SELMR3Relocate(pVM);
1075 VMMR3Relocate(pVM, offDelta);
1076 SELMR3Relocate(pVM); /* !hack! fix stack! */
1077 TRPMR3Relocate(pVM, offDelta);
1078 IOMR3Relocate(pVM, offDelta);
1079 EMR3Relocate(pVM);
1080 TMR3Relocate(pVM, offDelta);
1081 IEMR3Relocate(pVM);
1082 DBGFR3Relocate(pVM, offDelta);
1083 PDMR3Relocate(pVM, offDelta);
1084 GIMR3Relocate(pVM, offDelta);
1085}
1086
1087
1088/**
1089 * EMT rendezvous worker for VMR3PowerOn.
1090 *
1091 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1092 * code, see FNVMMEMTRENDEZVOUS.)
1093 *
1094 * @param pVM The cross context VM structure.
1095 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1096 * @param pvUser Ignored.
1097 */
1098static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1099{
1100 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1101 Assert(!pvUser); NOREF(pvUser);
1102
1103 /*
1104 * The first thread thru here tries to change the state. We shouldn't be
1105 * called again if this fails.
1106 */
1107 if (pVCpu->idCpu == pVM->cCpus - 1)
1108 {
1109 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1110 if (RT_FAILURE(rc))
1111 return rc;
1112 }
1113
1114 VMSTATE enmVMState = VMR3GetState(pVM);
1115 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1116 ("%s\n", VMR3GetStateName(enmVMState)),
1117 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1118
1119 /*
1120 * All EMTs changes their state to started.
1121 */
1122 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1123
1124 /*
1125 * EMT(0) is last thru here and it will make the notification calls
1126 * and advance the state.
1127 */
1128 if (pVCpu->idCpu == 0)
1129 {
1130 PDMR3PowerOn(pVM);
1131 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1132 }
1133
1134 return VINF_SUCCESS;
1135}
1136
1137
1138/**
1139 * Powers on the virtual machine.
1140 *
1141 * @returns VBox status code.
1142 *
1143 * @param pUVM The VM to power on.
1144 *
1145 * @thread Any thread.
1146 * @vmstate Created
1147 * @vmstateto PoweringOn+Running
1148 */
1149VMMR3DECL(int) VMR3PowerOn(PUVM pUVM)
1150{
1151 LogFlow(("VMR3PowerOn: pUVM=%p\n", pUVM));
1152 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1153 PVM pVM = pUVM->pVM;
1154 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1155
1156 /*
1157 * Gather all the EMTs to reduce the init TSC drift and keep
1158 * the state changing APIs a bit uniform.
1159 */
1160 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1161 vmR3PowerOn, NULL);
1162 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1163 return rc;
1164}
1165
1166
1167/**
1168 * Does the suspend notifications.
1169 *
1170 * @param pVM The cross context VM structure.
1171 * @thread EMT(0)
1172 */
1173static void vmR3SuspendDoWork(PVM pVM)
1174{
1175 PDMR3Suspend(pVM);
1176}
1177
1178
1179/**
1180 * EMT rendezvous worker for VMR3Suspend.
1181 *
1182 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1183 * return code, see FNVMMEMTRENDEZVOUS.)
1184 *
1185 * @param pVM The cross context VM structure.
1186 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1187 * @param pvUser Ignored.
1188 */
1189static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1190{
1191 VMSUSPENDREASON enmReason = (VMSUSPENDREASON)(uintptr_t)pvUser;
1192 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1193
1194 /*
1195 * The first EMT switches the state to suspending. If this fails because
1196 * something was racing us in one way or the other, there will be no more
1197 * calls and thus the state assertion below is not going to annoy anyone.
1198 */
1199 if (pVCpu->idCpu == pVM->cCpus - 1)
1200 {
1201 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1202 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1203 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1204 if (RT_FAILURE(rc))
1205 return rc;
1206 pVM->pUVM->vm.s.enmSuspendReason = enmReason;
1207 }
1208
1209 VMSTATE enmVMState = VMR3GetState(pVM);
1210 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1211 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1212 ("%s\n", VMR3GetStateName(enmVMState)),
1213 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1214
1215 /*
1216 * EMT(0) does the actually suspending *after* all the other CPUs have
1217 * been thru here.
1218 */
1219 if (pVCpu->idCpu == 0)
1220 {
1221 vmR3SuspendDoWork(pVM);
1222
1223 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1224 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1225 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1226 if (RT_FAILURE(rc))
1227 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1228 }
1229
1230 return VINF_EM_SUSPEND;
1231}
1232
1233
1234/**
1235 * Suspends a running VM.
1236 *
1237 * @returns VBox status code. When called on EMT, this will be a strict status
1238 * code that has to be propagated up the call stack.
1239 *
1240 * @param pUVM The VM to suspend.
1241 * @param enmReason The reason for suspending.
1242 *
1243 * @thread Any thread.
1244 * @vmstate Running or RunningLS
1245 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1246 */
1247VMMR3DECL(int) VMR3Suspend(PUVM pUVM, VMSUSPENDREASON enmReason)
1248{
1249 LogFlow(("VMR3Suspend: pUVM=%p\n", pUVM));
1250 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1251 AssertReturn(enmReason > VMSUSPENDREASON_INVALID && enmReason < VMSUSPENDREASON_END, VERR_INVALID_PARAMETER);
1252
1253 /*
1254 * Gather all the EMTs to make sure there are no races before
1255 * changing the VM state.
1256 */
1257 int rc = VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1258 vmR3Suspend, (void *)(uintptr_t)enmReason);
1259 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1260 return rc;
1261}
1262
1263
1264/**
1265 * Retrieves the reason for the most recent suspend.
1266 *
1267 * @returns Suspend reason. VMSUSPENDREASON_INVALID if no suspend has been done
1268 * or the handle is invalid.
1269 * @param pUVM The user mode VM handle.
1270 */
1271VMMR3DECL(VMSUSPENDREASON) VMR3GetSuspendReason(PUVM pUVM)
1272{
1273 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSUSPENDREASON_INVALID);
1274 return pUVM->vm.s.enmSuspendReason;
1275}
1276
1277
1278/**
1279 * EMT rendezvous worker for VMR3Resume.
1280 *
1281 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1282 * return code, see FNVMMEMTRENDEZVOUS.)
1283 *
1284 * @param pVM The cross context VM structure.
1285 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1286 * @param pvUser Reason.
1287 */
1288static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1289{
1290 VMRESUMEREASON enmReason = (VMRESUMEREASON)(uintptr_t)pvUser;
1291 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1292
1293 /*
1294 * The first thread thru here tries to change the state. We shouldn't be
1295 * called again if this fails.
1296 */
1297 if (pVCpu->idCpu == pVM->cCpus - 1)
1298 {
1299 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1300 if (RT_FAILURE(rc))
1301 return rc;
1302 pVM->pUVM->vm.s.enmResumeReason = enmReason;
1303 }
1304
1305 VMSTATE enmVMState = VMR3GetState(pVM);
1306 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1307 ("%s\n", VMR3GetStateName(enmVMState)),
1308 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1309
1310#if 0
1311 /*
1312 * All EMTs changes their state to started.
1313 */
1314 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1315#endif
1316
1317 /*
1318 * EMT(0) is last thru here and it will make the notification calls
1319 * and advance the state.
1320 */
1321 if (pVCpu->idCpu == 0)
1322 {
1323 PDMR3Resume(pVM);
1324 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1325 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1326 }
1327
1328 return VINF_EM_RESUME;
1329}
1330
1331
1332/**
1333 * Resume VM execution.
1334 *
1335 * @returns VBox status code. When called on EMT, this will be a strict status
1336 * code that has to be propagated up the call stack.
1337 *
1338 * @param pUVM The user mode VM handle.
1339 * @param enmReason The reason we're resuming.
1340 *
1341 * @thread Any thread.
1342 * @vmstate Suspended
1343 * @vmstateto Running
1344 */
1345VMMR3DECL(int) VMR3Resume(PUVM pUVM, VMRESUMEREASON enmReason)
1346{
1347 LogFlow(("VMR3Resume: pUVM=%p\n", pUVM));
1348 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1349 PVM pVM = pUVM->pVM;
1350 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1351 AssertReturn(enmReason > VMRESUMEREASON_INVALID && enmReason < VMRESUMEREASON_END, VERR_INVALID_PARAMETER);
1352
1353 /*
1354 * Gather all the EMTs to make sure there are no races before
1355 * changing the VM state.
1356 */
1357 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1358 vmR3Resume, (void *)(uintptr_t)enmReason);
1359 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1360 return rc;
1361}
1362
1363
1364/**
1365 * Retrieves the reason for the most recent resume.
1366 *
1367 * @returns Resume reason. VMRESUMEREASON_INVALID if no suspend has been
1368 * done or the handle is invalid.
1369 * @param pUVM The user mode VM handle.
1370 */
1371VMMR3DECL(VMRESUMEREASON) VMR3GetResumeReason(PUVM pUVM)
1372{
1373 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMRESUMEREASON_INVALID);
1374 return pUVM->vm.s.enmResumeReason;
1375}
1376
1377
1378/**
1379 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1380 * after the live step has been completed.
1381 *
1382 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1383 * return code, see FNVMMEMTRENDEZVOUS.)
1384 *
1385 * @param pVM The cross context VM structure.
1386 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1387 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1388 */
1389static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1390{
1391 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1392 bool *pfSuspended = (bool *)pvUser;
1393
1394 /*
1395 * The first thread thru here tries to change the state. We shouldn't be
1396 * called again if this fails.
1397 */
1398 if (pVCpu->idCpu == pVM->cCpus - 1U)
1399 {
1400 PUVM pUVM = pVM->pUVM;
1401 int rc;
1402
1403 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1404 VMSTATE enmVMState = pVM->enmVMState;
1405 switch (enmVMState)
1406 {
1407 case VMSTATE_RUNNING_LS:
1408 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS, false /*fSetRatherThanClearFF*/);
1409 rc = VINF_SUCCESS;
1410 break;
1411
1412 case VMSTATE_SUSPENDED_EXT_LS:
1413 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1414 rc = VINF_SUCCESS;
1415 break;
1416
1417 case VMSTATE_DEBUGGING_LS:
1418 rc = VERR_TRY_AGAIN;
1419 break;
1420
1421 case VMSTATE_OFF_LS:
1422 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS, false /*fSetRatherThanClearFF*/);
1423 rc = VERR_SSM_LIVE_POWERED_OFF;
1424 break;
1425
1426 case VMSTATE_FATAL_ERROR_LS:
1427 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, false /*fSetRatherThanClearFF*/);
1428 rc = VERR_SSM_LIVE_FATAL_ERROR;
1429 break;
1430
1431 case VMSTATE_GURU_MEDITATION_LS:
1432 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, false /*fSetRatherThanClearFF*/);
1433 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1434 break;
1435
1436 case VMSTATE_POWERING_OFF_LS:
1437 case VMSTATE_SUSPENDING_EXT_LS:
1438 case VMSTATE_RESETTING_LS:
1439 default:
1440 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1441 rc = VERR_VM_UNEXPECTED_VM_STATE;
1442 break;
1443 }
1444 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1445 if (RT_FAILURE(rc))
1446 {
1447 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1448 return rc;
1449 }
1450 }
1451
1452 VMSTATE enmVMState = VMR3GetState(pVM);
1453 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1454 ("%s\n", VMR3GetStateName(enmVMState)),
1455 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1456
1457 /*
1458 * Only EMT(0) have work to do since it's last thru here.
1459 */
1460 if (pVCpu->idCpu == 0)
1461 {
1462 vmR3SuspendDoWork(pVM);
1463 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1464 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1465 if (RT_FAILURE(rc))
1466 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1467
1468 *pfSuspended = true;
1469 }
1470
1471 return VINF_EM_SUSPEND;
1472}
1473
1474
1475/**
1476 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1477 * SSMR3LiveDoStep1 failure.
1478 *
1479 * Doing this as a rendezvous operation avoids all annoying transition
1480 * states.
1481 *
1482 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1483 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1484 *
1485 * @param pVM The cross context VM structure.
1486 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1487 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1488 */
1489static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1490{
1491 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1492 bool *pfSuspended = (bool *)pvUser;
1493 NOREF(pVCpu);
1494
1495 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1496 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1497 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1498 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1499 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1500 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1501 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1502 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1503 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1504 if (rc == 1)
1505 rc = VERR_SSM_LIVE_POWERED_OFF;
1506 else if (rc == 2)
1507 rc = VERR_SSM_LIVE_FATAL_ERROR;
1508 else if (rc == 3)
1509 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1510 else if (rc == 4)
1511 {
1512 *pfSuspended = true;
1513 rc = VINF_SUCCESS;
1514 }
1515 else if (rc > 0)
1516 rc = VINF_SUCCESS;
1517 return rc;
1518}
1519
1520
1521/**
1522 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1523 *
1524 * @returns VBox status code.
1525 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1526 *
1527 * @param pVM The cross context VM structure.
1528 * @param pSSM The handle of saved state operation.
1529 *
1530 * @thread EMT(0)
1531 */
1532static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1533{
1534 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1535 VM_ASSERT_EMT0(pVM);
1536
1537 /*
1538 * Advance the state and mark if VMR3Suspend was called.
1539 */
1540 int rc = VINF_SUCCESS;
1541 VMSTATE enmVMState = VMR3GetState(pVM);
1542 if (enmVMState == VMSTATE_SUSPENDED_LS)
1543 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1544 else
1545 {
1546 if (enmVMState != VMSTATE_SAVING)
1547 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1548 rc = VINF_SSM_LIVE_SUSPENDED;
1549 }
1550
1551 /*
1552 * Finish up and release the handle. Careful with the status codes.
1553 */
1554 int rc2 = SSMR3LiveDoStep2(pSSM);
1555 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1556 rc = rc2;
1557
1558 rc2 = SSMR3LiveDone(pSSM);
1559 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1560 rc = rc2;
1561
1562 /*
1563 * Advance to the final state and return.
1564 */
1565 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1566 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1567 return rc;
1568}
1569
1570
1571/**
1572 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1573 * SSMR3LiveSave.
1574 *
1575 * @returns VBox status code.
1576 *
1577 * @param pVM The cross context VM structure.
1578 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1579 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1580 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1581 * @param pvStreamOpsUser The user argument to the stream methods.
1582 * @param enmAfter What to do afterwards.
1583 * @param pfnProgress Progress callback. Optional.
1584 * @param pvProgressUser User argument for the progress callback.
1585 * @param ppSSM Where to return the saved state handle in case of a
1586 * live snapshot scenario.
1587 *
1588 * @thread EMT
1589 */
1590static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1591 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM)
1592{
1593 int rc = VINF_SUCCESS;
1594
1595 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1596 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1597
1598 /*
1599 * Validate input.
1600 */
1601 AssertPtrNull(pszFilename);
1602 AssertPtrNull(pStreamOps);
1603 AssertPtr(pVM);
1604 Assert( enmAfter == SSMAFTER_DESTROY
1605 || enmAfter == SSMAFTER_CONTINUE
1606 || enmAfter == SSMAFTER_TELEPORT);
1607 AssertPtr(ppSSM);
1608 *ppSSM = NULL;
1609
1610 /*
1611 * Change the state and perform/start the saving.
1612 */
1613 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1614 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1615 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1616 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1617 {
1618 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1619 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1620 }
1621 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1622 {
1623 if (enmAfter == SSMAFTER_TELEPORT)
1624 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1625 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1626 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1627 /* (We're not subject to cancellation just yet.) */
1628 }
1629 else
1630 Assert(RT_FAILURE(rc));
1631 return rc;
1632}
1633
1634
1635/**
1636 * Common worker for VMR3Save and VMR3Teleport.
1637 *
1638 * @returns VBox status code.
1639 *
1640 * @param pVM The cross context VM structure.
1641 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1642 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1643 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1644 * @param pvStreamOpsUser The user argument to the stream methods.
1645 * @param enmAfter What to do afterwards.
1646 * @param pfnProgress Progress callback. Optional.
1647 * @param pvProgressUser User argument for the progress callback.
1648 * @param pfSuspended Set if we suspended the VM.
1649 *
1650 * @thread Non-EMT
1651 */
1652static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1653 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1654 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1655{
1656 /*
1657 * Request the operation in EMT(0).
1658 */
1659 PSSMHANDLE pSSM;
1660 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/,
1661 (PFNRT)vmR3Save, 9, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1662 enmAfter, pfnProgress, pvProgressUser, &pSSM);
1663 if ( RT_SUCCESS(rc)
1664 && pSSM)
1665 {
1666 /*
1667 * Live snapshot.
1668 *
1669 * The state handling here is kind of tricky, doing it on EMT(0) helps
1670 * a bit. See the VMSTATE diagram for details.
1671 */
1672 rc = SSMR3LiveDoStep1(pSSM);
1673 if (RT_SUCCESS(rc))
1674 {
1675 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1676 for (;;)
1677 {
1678 /* Try suspend the VM. */
1679 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1680 vmR3LiveDoSuspend, pfSuspended);
1681 if (rc != VERR_TRY_AGAIN)
1682 break;
1683
1684 /* Wait for the state to change. */
1685 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1686 }
1687 if (RT_SUCCESS(rc))
1688 rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1689 else
1690 {
1691 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1692 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); NOREF(rc2);
1693 }
1694 }
1695 else
1696 {
1697 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1698 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1699
1700 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1701 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1702 rc = rc2;
1703 }
1704 }
1705
1706 return rc;
1707}
1708
1709
1710/**
1711 * Save current VM state.
1712 *
1713 * Can be used for both saving the state and creating snapshots.
1714 *
1715 * When called for a VM in the Running state, the saved state is created live
1716 * and the VM is only suspended when the final part of the saving is preformed.
1717 * The VM state will not be restored to Running in this case and it's up to the
1718 * caller to call VMR3Resume if this is desirable. (The rational is that the
1719 * caller probably wish to reconfigure the disks before resuming the VM.)
1720 *
1721 * @returns VBox status code.
1722 *
1723 * @param pUVM The VM which state should be saved.
1724 * @param pszFilename The name of the save state file.
1725 * @param fContinueAfterwards Whether continue execution afterwards or not.
1726 * When in doubt, set this to true.
1727 * @param pfnProgress Progress callback. Optional.
1728 * @param pvUser User argument for the progress callback.
1729 * @param pfSuspended Set if we suspended the VM.
1730 *
1731 * @thread Non-EMT.
1732 * @vmstate Suspended or Running
1733 * @vmstateto Saving+Suspended or
1734 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1735 */
1736VMMR3DECL(int) VMR3Save(PUVM pUVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser,
1737 bool *pfSuspended)
1738{
1739 LogFlow(("VMR3Save: pUVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1740 pUVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1741
1742 /*
1743 * Validate input.
1744 */
1745 AssertPtr(pfSuspended);
1746 *pfSuspended = false;
1747 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1748 PVM pVM = pUVM->pVM;
1749 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1750 VM_ASSERT_OTHER_THREAD(pVM);
1751 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1752 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1753 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1754
1755 /*
1756 * Join paths with VMR3Teleport.
1757 */
1758 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1759 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1760 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1761 enmAfter, pfnProgress, pvUser, pfSuspended);
1762 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1763 return rc;
1764}
1765
1766
1767/**
1768 * Teleport the VM (aka live migration).
1769 *
1770 * @returns VBox status code.
1771 *
1772 * @param pUVM The VM which state should be saved.
1773 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1774 * @param pStreamOps The stream methods.
1775 * @param pvStreamOpsUser The user argument to the stream methods.
1776 * @param pfnProgress Progress callback. Optional.
1777 * @param pvProgressUser User argument for the progress callback.
1778 * @param pfSuspended Set if we suspended the VM.
1779 *
1780 * @thread Non-EMT.
1781 * @vmstate Suspended or Running
1782 * @vmstateto Saving+Suspended or
1783 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1784 */
1785VMMR3DECL(int) VMR3Teleport(PUVM pUVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1786 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1787{
1788 LogFlow(("VMR3Teleport: pUVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1789 pUVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1790
1791 /*
1792 * Validate input.
1793 */
1794 AssertPtr(pfSuspended);
1795 *pfSuspended = false;
1796 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1797 PVM pVM = pUVM->pVM;
1798 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1799 VM_ASSERT_OTHER_THREAD(pVM);
1800 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1801 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1802
1803 /*
1804 * Join paths with VMR3Save.
1805 */
1806 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime, NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1807 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended);
1808 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1809 return rc;
1810}
1811
1812
1813
1814/**
1815 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1816 *
1817 * @returns VBox status code.
1818 *
1819 * @param pUVM Pointer to the VM.
1820 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1821 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1822 * @param pvStreamOpsUser The user argument to the stream methods.
1823 * @param pfnProgress Progress callback. Optional.
1824 * @param pvProgressUser User argument for the progress callback.
1825 * @param fTeleporting Indicates whether we're teleporting or not.
1826 *
1827 * @thread EMT.
1828 */
1829static DECLCALLBACK(int) vmR3Load(PUVM pUVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1830 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting)
1831{
1832 LogFlow(("vmR3Load: pUVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1833 pUVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1834
1835 /*
1836 * Validate input (paranoia).
1837 */
1838 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1839 PVM pVM = pUVM->pVM;
1840 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1841 AssertPtrNull(pszFilename);
1842 AssertPtrNull(pStreamOps);
1843 AssertPtrNull(pfnProgress);
1844
1845 /*
1846 * Change the state and perform the load.
1847 *
1848 * Always perform a relocation round afterwards to make sure hypervisor
1849 * selectors and such are correct.
1850 */
1851 int rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1852 VMSTATE_LOADING, VMSTATE_CREATED,
1853 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1854 if (RT_FAILURE(rc))
1855 return rc;
1856
1857 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1858
1859 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pUVM);
1860 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1861 if (RT_SUCCESS(rc))
1862 {
1863 VMR3Relocate(pVM, 0 /*offDelta*/);
1864 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1865 }
1866 else
1867 {
1868 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1869 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1870
1871 if (cErrorsPriorToSave == VMR3GetErrorCount(pUVM))
1872 rc = VMSetError(pVM, rc, RT_SRC_POS,
1873 N_("Unable to restore the virtual machine's saved state from '%s'. "
1874 "It may be damaged or from an older version of VirtualBox. "
1875 "Please discard the saved state before starting the virtual machine"),
1876 pszFilename);
1877 }
1878
1879 return rc;
1880}
1881
1882
1883/**
1884 * Loads a VM state into a newly created VM or a one that is suspended.
1885 *
1886 * To restore a saved state on VM startup, call this function and then resume
1887 * the VM instead of powering it on.
1888 *
1889 * @returns VBox status code.
1890 *
1891 * @param pUVM The user mode VM structure.
1892 * @param pszFilename The name of the save state file.
1893 * @param pfnProgress Progress callback. Optional.
1894 * @param pvUser User argument for the progress callback.
1895 *
1896 * @thread Any thread.
1897 * @vmstate Created, Suspended
1898 * @vmstateto Loading+Suspended
1899 */
1900VMMR3DECL(int) VMR3LoadFromFile(PUVM pUVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
1901{
1902 LogFlow(("VMR3LoadFromFile: pUVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
1903 pUVM, pszFilename, pszFilename, pfnProgress, pvUser));
1904
1905 /*
1906 * Validate input.
1907 */
1908 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1909 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1910
1911 /*
1912 * Forward the request to EMT(0). No need to setup a rendezvous here
1913 * since there is no execution taking place when this call is allowed.
1914 */
1915 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
1916 pUVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/,
1917 pfnProgress, pvUser, false /*fTeleporting*/);
1918 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
1919 return rc;
1920}
1921
1922
1923/**
1924 * VMR3LoadFromFile for arbitrary file streams.
1925 *
1926 * @returns VBox status code.
1927 *
1928 * @param pUVM Pointer to the VM.
1929 * @param pStreamOps The stream methods.
1930 * @param pvStreamOpsUser The user argument to the stream methods.
1931 * @param pfnProgress Progress callback. Optional.
1932 * @param pvProgressUser User argument for the progress callback.
1933 *
1934 * @thread Any thread.
1935 * @vmstate Created, Suspended
1936 * @vmstateto Loading+Suspended
1937 */
1938VMMR3DECL(int) VMR3LoadFromStream(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1939 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
1940{
1941 LogFlow(("VMR3LoadFromStream: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
1942 pUVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1943
1944 /*
1945 * Validate input.
1946 */
1947 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1948 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1949
1950 /*
1951 * Forward the request to EMT(0). No need to setup a rendezvous here
1952 * since there is no execution taking place when this call is allowed.
1953 */
1954 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
1955 pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress,
1956 pvProgressUser, true /*fTeleporting*/);
1957 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
1958 return rc;
1959}
1960
1961
1962/**
1963 * EMT rendezvous worker for VMR3PowerOff.
1964 *
1965 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
1966 * return code, see FNVMMEMTRENDEZVOUS.)
1967 *
1968 * @param pVM The cross context VM structure.
1969 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1970 * @param pvUser Ignored.
1971 */
1972static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
1973{
1974 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1975 Assert(!pvUser); NOREF(pvUser);
1976
1977 /*
1978 * The first EMT thru here will change the state to PoweringOff.
1979 */
1980 if (pVCpu->idCpu == pVM->cCpus - 1)
1981 {
1982 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
1983 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
1984 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
1985 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
1986 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
1987 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
1988 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
1989 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
1990 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
1991 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
1992 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
1993 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
1994 if (RT_FAILURE(rc))
1995 return rc;
1996 if (rc >= 7)
1997 SSMR3Cancel(pVM->pUVM);
1998 }
1999
2000 /*
2001 * Check the state.
2002 */
2003 VMSTATE enmVMState = VMR3GetState(pVM);
2004 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2005 || enmVMState == VMSTATE_POWERING_OFF_LS,
2006 ("%s\n", VMR3GetStateName(enmVMState)),
2007 VERR_VM_INVALID_VM_STATE);
2008
2009 /*
2010 * EMT(0) does the actual power off work here *after* all the other EMTs
2011 * have been thru and entered the STOPPED state.
2012 */
2013 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2014 if (pVCpu->idCpu == 0)
2015 {
2016 /*
2017 * For debugging purposes, we will log a summary of the guest state at this point.
2018 */
2019 if (enmVMState != VMSTATE_GURU_MEDITATION)
2020 {
2021 /** @todo make the state dumping at VMR3PowerOff optional. */
2022 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2023 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
2024 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2025 RTLogRelPrintf("***\n");
2026 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
2027 RTLogRelPrintf("***\n");
2028 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2029 RTLogRelPrintf("***\n");
2030 DBGFR3Info(pVM->pUVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2031 RTLogRelPrintf("***\n");
2032 DBGFR3Info(pVM->pUVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2033 /** @todo dump guest call stack. */
2034 RTLogRelSetBuffering(fOldBuffered);
2035 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2036 }
2037
2038 /*
2039 * Perform the power off notifications and advance the state to
2040 * Off or OffLS.
2041 */
2042 PDMR3PowerOff(pVM);
2043 DBGFR3PowerOff(pVM);
2044
2045 PUVM pUVM = pVM->pUVM;
2046 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2047 enmVMState = pVM->enmVMState;
2048 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2049 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS, false /*fSetRatherThanClearFF*/);
2050 else
2051 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF, false /*fSetRatherThanClearFF*/);
2052 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2053 }
2054 else if (enmVMState != VMSTATE_GURU_MEDITATION)
2055 {
2056 /** @todo make the state dumping at VMR3PowerOff optional. */
2057 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2058 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
2059 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2060 RTLogRelPrintf("***\n");
2061 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
2062 RTLogRelPrintf("***\n");
2063 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2064 RTLogRelPrintf("***\n");
2065 RTLogRelSetBuffering(fOldBuffered);
2066 RTLogRelPrintf("************** End of Guest state at power off for VCpu %u ***************\n", pVCpu->idCpu);
2067 }
2068
2069 return VINF_EM_OFF;
2070}
2071
2072
2073/**
2074 * Power off the VM.
2075 *
2076 * @returns VBox status code. When called on EMT, this will be a strict status
2077 * code that has to be propagated up the call stack.
2078 *
2079 * @param pUVM The handle of the VM to be powered off.
2080 *
2081 * @thread Any thread.
2082 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2083 * @vmstateto Off or OffLS
2084 */
2085VMMR3DECL(int) VMR3PowerOff(PUVM pUVM)
2086{
2087 LogFlow(("VMR3PowerOff: pUVM=%p\n", pUVM));
2088 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2089 PVM pVM = pUVM->pVM;
2090 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2091
2092 /*
2093 * Gather all the EMTs to make sure there are no races before
2094 * changing the VM state.
2095 */
2096 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2097 vmR3PowerOff, NULL);
2098 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2099 return rc;
2100}
2101
2102
2103/**
2104 * Destroys the VM.
2105 *
2106 * The VM must be powered off (or never really powered on) to call this
2107 * function. The VM handle is destroyed and can no longer be used up successful
2108 * return.
2109 *
2110 * @returns VBox status code.
2111 *
2112 * @param pUVM The user mode VM handle.
2113 *
2114 * @thread Any none emulation thread.
2115 * @vmstate Off, Created
2116 * @vmstateto N/A
2117 */
2118VMMR3DECL(int) VMR3Destroy(PUVM pUVM)
2119{
2120 LogFlow(("VMR3Destroy: pUVM=%p\n", pUVM));
2121
2122 /*
2123 * Validate input.
2124 */
2125 if (!pUVM)
2126 return VERR_INVALID_VM_HANDLE;
2127 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2128 PVM pVM = pUVM->pVM;
2129 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2130 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2131
2132 /*
2133 * Change VM state to destroying and aall vmR3Destroy on each of the EMTs
2134 * ending with EMT(0) doing the bulk of the cleanup.
2135 */
2136 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2137 if (RT_FAILURE(rc))
2138 return rc;
2139
2140 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2141 AssertLogRelRC(rc);
2142
2143 /*
2144 * Wait for EMTs to quit and destroy the UVM.
2145 */
2146 vmR3DestroyUVM(pUVM, 30000);
2147
2148 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2149 return VINF_SUCCESS;
2150}
2151
2152
2153/**
2154 * Internal destruction worker.
2155 *
2156 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2157 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2158 * VMR3Destroy().
2159 *
2160 * When called on EMT(0), it will performed the great bulk of the destruction.
2161 * When called on the other EMTs, they will do nothing and the whole purpose is
2162 * to return VINF_EM_TERMINATE so they break out of their run loops.
2163 *
2164 * @returns VINF_EM_TERMINATE.
2165 * @param pVM The cross context VM structure.
2166 */
2167DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2168{
2169 PUVM pUVM = pVM->pUVM;
2170 PVMCPU pVCpu = VMMGetCpu(pVM);
2171 Assert(pVCpu);
2172 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2173
2174 /*
2175 * Only VCPU 0 does the full cleanup (last).
2176 */
2177 if (pVCpu->idCpu == 0)
2178 {
2179 /*
2180 * Dump statistics to the log.
2181 */
2182#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2183 RTLogFlags(NULL, "nodisabled nobuffered");
2184#endif
2185//#ifdef VBOX_WITH_STATISTICS
2186// STAMR3Dump(pUVM, "*");
2187//#else
2188 LogRel(("************************* Statistics *************************\n"));
2189 STAMR3DumpToReleaseLog(pUVM, "*");
2190 LogRel(("********************* End of statistics **********************\n"));
2191//#endif
2192
2193 /*
2194 * Destroy the VM components.
2195 */
2196 int rc = TMR3Term(pVM);
2197 AssertRC(rc);
2198#ifdef VBOX_WITH_DEBUGGER
2199 rc = DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
2200 pUVM->vm.s.pvDBGC = NULL;
2201#endif
2202 AssertRC(rc);
2203 rc = PDMR3Term(pVM);
2204 AssertRC(rc);
2205 rc = GIMR3Term(pVM);
2206 AssertRC(rc);
2207 rc = DBGFR3Term(pVM);
2208 AssertRC(rc);
2209 rc = IEMR3Term(pVM);
2210 AssertRC(rc);
2211 rc = EMR3Term(pVM);
2212 AssertRC(rc);
2213 rc = IOMR3Term(pVM);
2214 AssertRC(rc);
2215 rc = TRPMR3Term(pVM);
2216 AssertRC(rc);
2217 rc = SELMR3Term(pVM);
2218 AssertRC(rc);
2219 rc = HMR3Term(pVM);
2220 AssertRC(rc);
2221 rc = NEMR3Term(pVM);
2222 AssertRC(rc);
2223 rc = PGMR3Term(pVM);
2224 AssertRC(rc);
2225 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2226 AssertRC(rc);
2227 rc = CPUMR3Term(pVM);
2228 AssertRC(rc);
2229 SSMR3Term(pVM);
2230 rc = PDMR3CritSectBothTerm(pVM);
2231 AssertRC(rc);
2232 rc = MMR3Term(pVM);
2233 AssertRC(rc);
2234
2235 /*
2236 * We're done, tell the other EMTs to quit.
2237 */
2238 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2239 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2240 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2241 }
2242
2243 /*
2244 * Decrement the active EMT count here.
2245 */
2246 PUVMCPU pUVCpu = &pUVM->aCpus[pVCpu->idCpu];
2247 if (!pUVCpu->vm.s.fBeenThruVmDestroy)
2248 {
2249 pUVCpu->vm.s.fBeenThruVmDestroy = true;
2250 ASMAtomicDecU32(&pUVM->vm.s.cActiveEmts);
2251 }
2252 else
2253 AssertFailed();
2254
2255 return VINF_EM_TERMINATE;
2256}
2257
2258
2259/**
2260 * Destroys the UVM portion.
2261 *
2262 * This is called as the final step in the VM destruction or as the cleanup
2263 * in case of a creation failure.
2264 *
2265 * @param pUVM The user mode VM structure.
2266 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2267 * threads.
2268 */
2269static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2270{
2271 /*
2272 * Signal termination of each the emulation threads and
2273 * wait for them to complete.
2274 */
2275 /* Signal them - in reverse order since EMT(0) waits for the others. */
2276 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2277 if (pUVM->pVM)
2278 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2279 VMCPUID iCpu = pUVM->cCpus;
2280 while (iCpu-- > 0)
2281 {
2282 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2283 RTSemEventSignal(pUVM->aCpus[iCpu].vm.s.EventSemWait);
2284 }
2285
2286 /* Wait for EMT(0), it in turn waits for the rest. */
2287 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2288
2289 RTTHREAD const hSelf = RTThreadSelf();
2290 RTTHREAD hThread = pUVM->aCpus[0].vm.s.ThreadEMT;
2291 if ( hThread != NIL_RTTHREAD
2292 && hThread != hSelf)
2293 {
2294 int rc2 = RTThreadWait(hThread, RT_MAX(cMilliesEMTWait, 2000), NULL);
2295 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2296 rc2 = RTThreadWait(hThread, 1000, NULL);
2297 AssertLogRelMsgRC(rc2, ("iCpu=0 rc=%Rrc\n", rc2));
2298 if (RT_SUCCESS(rc2))
2299 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2300 }
2301
2302 /* Just in case we're in a weird failure situation w/o EMT(0) to do the
2303 waiting, wait the other EMTs too. */
2304 for (iCpu = 1; iCpu < pUVM->cCpus; iCpu++)
2305 {
2306 ASMAtomicXchgHandle(&pUVM->aCpus[iCpu].vm.s.ThreadEMT, NIL_RTTHREAD, &hThread);
2307 if (hThread != NIL_RTTHREAD)
2308 {
2309 if (hThread != hSelf)
2310 {
2311 int rc2 = RTThreadWait(hThread, 250 /*ms*/, NULL);
2312 AssertLogRelMsgRC(rc2, ("iCpu=%u rc=%Rrc\n", iCpu, rc2));
2313 if (RT_SUCCESS(rc2))
2314 continue;
2315 }
2316 pUVM->aCpus[iCpu].vm.s.ThreadEMT = hThread;
2317 }
2318 }
2319
2320 /* Cleanup the semaphores. */
2321 iCpu = pUVM->cCpus;
2322 while (iCpu-- > 0)
2323 {
2324 RTSemEventDestroy(pUVM->aCpus[iCpu].vm.s.EventSemWait);
2325 pUVM->aCpus[iCpu].vm.s.EventSemWait = NIL_RTSEMEVENT;
2326 }
2327
2328 /*
2329 * Free the event semaphores associated with the request packets.
2330 */
2331 unsigned cReqs = 0;
2332 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2333 {
2334 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2335 pUVM->vm.s.apReqFree[i] = NULL;
2336 for (; pReq; pReq = pReq->pNext, cReqs++)
2337 {
2338 pReq->enmState = VMREQSTATE_INVALID;
2339 RTSemEventDestroy(pReq->EventSem);
2340 }
2341 }
2342 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2343
2344 /*
2345 * Kill all queued requests. (There really shouldn't be any!)
2346 */
2347 for (unsigned i = 0; i < 10; i++)
2348 {
2349 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pPriorityReqs, NULL, PVMREQ);
2350 if (!pReqHead)
2351 {
2352 pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pNormalReqs, NULL, PVMREQ);
2353 if (!pReqHead)
2354 break;
2355 }
2356 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2357
2358 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2359 {
2360 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2361 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2362 RTSemEventSignal(pReq->EventSem);
2363 RTThreadSleep(2);
2364 RTSemEventDestroy(pReq->EventSem);
2365 }
2366 /* give them a chance to respond before we free the request memory. */
2367 RTThreadSleep(32);
2368 }
2369
2370 /*
2371 * Now all queued VCPU requests (again, there shouldn't be any).
2372 */
2373 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2374 {
2375 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2376
2377 for (unsigned i = 0; i < 10; i++)
2378 {
2379 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pPriorityReqs, NULL, PVMREQ);
2380 if (!pReqHead)
2381 {
2382 pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pNormalReqs, NULL, PVMREQ);
2383 if (!pReqHead)
2384 break;
2385 }
2386 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2387
2388 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2389 {
2390 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2391 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2392 RTSemEventSignal(pReq->EventSem);
2393 RTThreadSleep(2);
2394 RTSemEventDestroy(pReq->EventSem);
2395 }
2396 /* give them a chance to respond before we free the request memory. */
2397 RTThreadSleep(32);
2398 }
2399 }
2400
2401 /*
2402 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2403 */
2404 PDMR3TermUVM(pUVM);
2405
2406 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
2407 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
2408
2409 /*
2410 * Terminate the support library if initialized.
2411 */
2412 if (pUVM->vm.s.pSession)
2413 {
2414 int rc = SUPR3Term(false /*fForced*/);
2415 AssertRC(rc);
2416 pUVM->vm.s.pSession = NIL_RTR0PTR;
2417 }
2418
2419 /*
2420 * Release the UVM structure reference.
2421 */
2422 VMR3ReleaseUVM(pUVM);
2423
2424 /*
2425 * Clean up and flush logs.
2426 */
2427 RTLogFlush(NULL);
2428}
2429
2430
2431/**
2432 * Worker which checks integrity of some internal structures.
2433 * This is yet another attempt to track down that AVL tree crash.
2434 */
2435static void vmR3CheckIntegrity(PVM pVM)
2436{
2437#ifdef VBOX_STRICT
2438 int rc = PGMR3CheckIntegrity(pVM);
2439 AssertReleaseRC(rc);
2440#else
2441 RT_NOREF_PV(pVM);
2442#endif
2443}
2444
2445
2446/**
2447 * EMT rendezvous worker for VMR3ResetFF for doing soft/warm reset.
2448 *
2449 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESCHEDULE.
2450 * (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
2451 *
2452 * @param pVM The cross context VM structure.
2453 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2454 * @param pvUser The reset flags.
2455 */
2456static DECLCALLBACK(VBOXSTRICTRC) vmR3SoftReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2457{
2458 uint32_t fResetFlags = *(uint32_t *)pvUser;
2459
2460
2461 /*
2462 * The first EMT will try change the state to resetting. If this fails,
2463 * we won't get called for the other EMTs.
2464 */
2465 if (pVCpu->idCpu == pVM->cCpus - 1)
2466 {
2467 int rc = vmR3TrySetState(pVM, "vmR3ResetSoft", 3,
2468 VMSTATE_SOFT_RESETTING, VMSTATE_RUNNING,
2469 VMSTATE_SOFT_RESETTING, VMSTATE_SUSPENDED,
2470 VMSTATE_SOFT_RESETTING_LS, VMSTATE_RUNNING_LS);
2471 if (RT_FAILURE(rc))
2472 return rc;
2473 pVM->vm.s.cResets++;
2474 pVM->vm.s.cSoftResets++;
2475 }
2476
2477 /*
2478 * Check the state.
2479 */
2480 VMSTATE enmVMState = VMR3GetState(pVM);
2481 AssertLogRelMsgReturn( enmVMState == VMSTATE_SOFT_RESETTING
2482 || enmVMState == VMSTATE_SOFT_RESETTING_LS,
2483 ("%s\n", VMR3GetStateName(enmVMState)),
2484 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2485
2486 /*
2487 * EMT(0) does the full cleanup *after* all the other EMTs has been
2488 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2489 *
2490 * Because there are per-cpu reset routines and order may/is important,
2491 * the following sequence looks a bit ugly...
2492 */
2493
2494 /* Reset the VCpu state. */
2495 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2496
2497 /*
2498 * Soft reset the VM components.
2499 */
2500 if (pVCpu->idCpu == 0)
2501 {
2502 PDMR3SoftReset(pVM, fResetFlags);
2503 TRPMR3Reset(pVM);
2504 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2505 EMR3Reset(pVM);
2506 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2507 NEMR3Reset(pVM);
2508
2509 /*
2510 * Since EMT(0) is the last to go thru here, it will advance the state.
2511 * (Unlike vmR3HardReset we won't be doing any suspending of live
2512 * migration VMs here since memory is unchanged.)
2513 */
2514 PUVM pUVM = pVM->pUVM;
2515 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2516 enmVMState = pVM->enmVMState;
2517 if (enmVMState == VMSTATE_SOFT_RESETTING)
2518 {
2519 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2520 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2521 else
2522 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2523 }
2524 else
2525 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING_LS, VMSTATE_SOFT_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2526 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2527 }
2528
2529 return VINF_EM_RESCHEDULE;
2530}
2531
2532
2533/**
2534 * EMT rendezvous worker for VMR3Reset and VMR3ResetFF.
2535 *
2536 * This is called by the emulation threads as a response to the reset request
2537 * issued by VMR3Reset().
2538 *
2539 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2540 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2541 *
2542 * @param pVM The cross context VM structure.
2543 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2544 * @param pvUser Ignored.
2545 */
2546static DECLCALLBACK(VBOXSTRICTRC) vmR3HardReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2547{
2548 Assert(!pvUser); NOREF(pvUser);
2549
2550 /*
2551 * The first EMT will try change the state to resetting. If this fails,
2552 * we won't get called for the other EMTs.
2553 */
2554 if (pVCpu->idCpu == pVM->cCpus - 1)
2555 {
2556 int rc = vmR3TrySetState(pVM, "vmR3HardReset", 3,
2557 VMSTATE_RESETTING, VMSTATE_RUNNING,
2558 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2559 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2560 if (RT_FAILURE(rc))
2561 return rc;
2562 pVM->vm.s.cResets++;
2563 pVM->vm.s.cHardResets++;
2564 }
2565
2566 /*
2567 * Check the state.
2568 */
2569 VMSTATE enmVMState = VMR3GetState(pVM);
2570 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2571 || enmVMState == VMSTATE_RESETTING_LS,
2572 ("%s\n", VMR3GetStateName(enmVMState)),
2573 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2574
2575 /*
2576 * EMT(0) does the full cleanup *after* all the other EMTs has been
2577 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2578 *
2579 * Because there are per-cpu reset routines and order may/is important,
2580 * the following sequence looks a bit ugly...
2581 */
2582 if (pVCpu->idCpu == 0)
2583 vmR3CheckIntegrity(pVM);
2584
2585 /* Reset the VCpu state. */
2586 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2587
2588 /* Clear all pending forced actions. */
2589 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2590
2591 /*
2592 * Reset the VM components.
2593 */
2594 if (pVCpu->idCpu == 0)
2595 {
2596 GIMR3Reset(pVM); /* This must come *before* PDM and TM. */
2597 PDMR3Reset(pVM);
2598 PGMR3Reset(pVM);
2599 SELMR3Reset(pVM);
2600 TRPMR3Reset(pVM);
2601 IOMR3Reset(pVM);
2602 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2603 TMR3Reset(pVM);
2604 EMR3Reset(pVM);
2605 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2606 NEMR3Reset(pVM);
2607
2608 /*
2609 * Do memory setup.
2610 */
2611 PGMR3MemSetup(pVM, true /*fAtReset*/);
2612 PDMR3MemSetup(pVM, true /*fAtReset*/);
2613
2614 /*
2615 * Since EMT(0) is the last to go thru here, it will advance the state.
2616 * When a live save is active, we will move on to SuspendingLS but
2617 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2618 */
2619 PUVM pUVM = pVM->pUVM;
2620 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2621 enmVMState = pVM->enmVMState;
2622 if (enmVMState == VMSTATE_RESETTING)
2623 {
2624 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2625 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2626 else
2627 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2628 }
2629 else
2630 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2631 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2632
2633 vmR3CheckIntegrity(pVM);
2634
2635 /*
2636 * Do the suspend bit as well.
2637 * It only requires some EMT(0) work at present.
2638 */
2639 if (enmVMState != VMSTATE_RESETTING)
2640 {
2641 vmR3SuspendDoWork(pVM);
2642 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2643 }
2644 }
2645
2646 return enmVMState == VMSTATE_RESETTING
2647 ? VINF_EM_RESET
2648 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2649}
2650
2651
2652/**
2653 * Internal worker for VMR3Reset, VMR3ResetFF, VMR3TripleFault.
2654 *
2655 * @returns VBox status code.
2656 * @param pVM The cross context VM structure.
2657 * @param fHardReset Whether it's a hard reset or not.
2658 * @param fResetFlags The reset flags (PDMVMRESET_F_XXX).
2659 */
2660static VBOXSTRICTRC vmR3ResetCommon(PVM pVM, bool fHardReset, uint32_t fResetFlags)
2661{
2662 LogFlow(("vmR3ResetCommon: fHardReset=%RTbool fResetFlags=%#x\n", fHardReset, fResetFlags));
2663 int rc;
2664 if (fHardReset)
2665 {
2666 /*
2667 * Hard reset.
2668 */
2669 /* Check whether we're supposed to power off instead of resetting. */
2670 if (pVM->vm.s.fPowerOffInsteadOfReset)
2671 {
2672 PUVM pUVM = pVM->pUVM;
2673 if ( pUVM->pVmm2UserMethods
2674 && pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff)
2675 pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff(pUVM->pVmm2UserMethods, pUVM);
2676 return VMR3PowerOff(pUVM);
2677 }
2678
2679 /* Gather all the EMTs to make sure there are no races before changing
2680 the VM state. */
2681 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2682 vmR3HardReset, NULL);
2683 }
2684 else
2685 {
2686 /*
2687 * Soft reset. Since we only support this with a single CPU active,
2688 * we must be on EMT #0 here.
2689 */
2690 VM_ASSERT_EMT0(pVM);
2691 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2692 vmR3SoftReset, &fResetFlags);
2693 }
2694
2695 LogFlow(("vmR3ResetCommon: returns %Rrc\n", rc));
2696 return rc;
2697}
2698
2699
2700
2701/**
2702 * Reset the current VM.
2703 *
2704 * @returns VBox status code.
2705 * @param pUVM The VM to reset.
2706 */
2707VMMR3DECL(int) VMR3Reset(PUVM pUVM)
2708{
2709 LogFlow(("VMR3Reset:\n"));
2710 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2711 PVM pVM = pUVM->pVM;
2712 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2713
2714 return VBOXSTRICTRC_VAL(vmR3ResetCommon(pVM, true, 0));
2715}
2716
2717
2718/**
2719 * Handle the reset force flag or triple fault.
2720 *
2721 * This handles both soft and hard resets (see PDMVMRESET_F_XXX).
2722 *
2723 * @returns VBox status code.
2724 * @param pVM The cross context VM structure.
2725 * @thread EMT
2726 *
2727 * @remarks Caller is expected to clear the VM_FF_RESET force flag.
2728 */
2729VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetFF(PVM pVM)
2730{
2731 LogFlow(("VMR3ResetFF:\n"));
2732
2733 /*
2734 * First consult the firmware on whether this is a hard or soft reset.
2735 */
2736 uint32_t fResetFlags;
2737 bool fHardReset = PDMR3GetResetInfo(pVM, 0 /*fOverride*/, &fResetFlags);
2738 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
2739}
2740
2741
2742/**
2743 * For handling a CPU reset on triple fault.
2744 *
2745 * According to one mainboard manual, a CPU triple fault causes the 286 CPU to
2746 * send a SHUTDOWN signal to the chipset. The chipset responds by sending a
2747 * RESET signal to the CPU. So, it should be very similar to a soft/warm reset.
2748 *
2749 * @returns VBox status code.
2750 * @param pVM The cross context VM structure.
2751 * @thread EMT
2752 */
2753VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetTripleFault(PVM pVM)
2754{
2755 LogFlow(("VMR3ResetTripleFault:\n"));
2756
2757 /*
2758 * First consult the firmware on whether this is a hard or soft reset.
2759 */
2760 uint32_t fResetFlags;
2761 bool fHardReset = PDMR3GetResetInfo(pVM, PDMVMRESET_F_TRIPLE_FAULT, &fResetFlags);
2762 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
2763}
2764
2765
2766/**
2767 * Gets the user mode VM structure pointer given Pointer to the VM.
2768 *
2769 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
2770 * invalid (asserted).
2771 * @param pVM The cross context VM structure.
2772 * @sa VMR3GetVM, VMR3RetainUVM
2773 */
2774VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
2775{
2776 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
2777 return pVM->pUVM;
2778}
2779
2780
2781/**
2782 * Gets the shared VM structure pointer given the pointer to the user mode VM
2783 * structure.
2784 *
2785 * @returns Pointer to the VM.
2786 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
2787 * is currently associated with it.
2788 * @param pUVM The user mode VM handle.
2789 * @sa VMR3GetUVM
2790 */
2791VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
2792{
2793 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2794 return pUVM->pVM;
2795}
2796
2797
2798/**
2799 * Retain the user mode VM handle.
2800 *
2801 * @returns Reference count.
2802 * UINT32_MAX if @a pUVM is invalid.
2803 *
2804 * @param pUVM The user mode VM handle.
2805 * @sa VMR3ReleaseUVM
2806 */
2807VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
2808{
2809 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2810 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
2811 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
2812 return cRefs;
2813}
2814
2815
2816/**
2817 * Does the final release of the UVM structure.
2818 *
2819 * @param pUVM The user mode VM handle.
2820 */
2821static void vmR3DoReleaseUVM(PUVM pUVM)
2822{
2823 /*
2824 * Free the UVM.
2825 */
2826 Assert(!pUVM->pVM);
2827
2828 MMR3HeapFree(pUVM->vm.s.pszName);
2829 pUVM->vm.s.pszName = NULL;
2830
2831 MMR3TermUVM(pUVM);
2832 STAMR3TermUVM(pUVM);
2833
2834 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2835 RTTlsFree(pUVM->vm.s.idxTLS);
2836 RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
2837}
2838
2839
2840/**
2841 * Releases a refernece to the mode VM handle.
2842 *
2843 * @returns The new reference count, 0 if destroyed.
2844 * UINT32_MAX if @a pUVM is invalid.
2845 *
2846 * @param pUVM The user mode VM handle.
2847 * @sa VMR3RetainUVM
2848 */
2849VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
2850{
2851 if (!pUVM)
2852 return 0;
2853 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2854 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
2855 if (!cRefs)
2856 vmR3DoReleaseUVM(pUVM);
2857 else
2858 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
2859 return cRefs;
2860}
2861
2862
2863/**
2864 * Gets the VM name.
2865 *
2866 * @returns Pointer to a read-only string containing the name. NULL if called
2867 * too early.
2868 * @param pUVM The user mode VM handle.
2869 */
2870VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
2871{
2872 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2873 return pUVM->vm.s.pszName;
2874}
2875
2876
2877/**
2878 * Gets the VM UUID.
2879 *
2880 * @returns pUuid on success, NULL on failure.
2881 * @param pUVM The user mode VM handle.
2882 * @param pUuid Where to store the UUID.
2883 */
2884VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
2885{
2886 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2887 AssertPtrReturn(pUuid, NULL);
2888
2889 *pUuid = pUVM->vm.s.Uuid;
2890 return pUuid;
2891}
2892
2893
2894/**
2895 * Gets the current VM state.
2896 *
2897 * @returns The current VM state.
2898 * @param pVM The cross context VM structure.
2899 * @thread Any
2900 */
2901VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
2902{
2903 AssertMsgReturn(RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE), ("%p\n", pVM), VMSTATE_TERMINATED);
2904 VMSTATE enmVMState = pVM->enmVMState;
2905 return enmVMState >= VMSTATE_CREATING && enmVMState <= VMSTATE_TERMINATED ? enmVMState : VMSTATE_TERMINATED;
2906}
2907
2908
2909/**
2910 * Gets the current VM state.
2911 *
2912 * @returns The current VM state.
2913 * @param pUVM The user-mode VM handle.
2914 * @thread Any
2915 */
2916VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
2917{
2918 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
2919 if (RT_UNLIKELY(!pUVM->pVM))
2920 return VMSTATE_TERMINATED;
2921 return pUVM->pVM->enmVMState;
2922}
2923
2924
2925/**
2926 * Gets the state name string for a VM state.
2927 *
2928 * @returns Pointer to the state name. (readonly)
2929 * @param enmState The state.
2930 */
2931VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
2932{
2933 switch (enmState)
2934 {
2935 case VMSTATE_CREATING: return "CREATING";
2936 case VMSTATE_CREATED: return "CREATED";
2937 case VMSTATE_LOADING: return "LOADING";
2938 case VMSTATE_POWERING_ON: return "POWERING_ON";
2939 case VMSTATE_RESUMING: return "RESUMING";
2940 case VMSTATE_RUNNING: return "RUNNING";
2941 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
2942 case VMSTATE_RESETTING: return "RESETTING";
2943 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
2944 case VMSTATE_SOFT_RESETTING: return "SOFT_RESETTING";
2945 case VMSTATE_SOFT_RESETTING_LS: return "SOFT_RESETTING_LS";
2946 case VMSTATE_SUSPENDED: return "SUSPENDED";
2947 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
2948 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
2949 case VMSTATE_SUSPENDING: return "SUSPENDING";
2950 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
2951 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
2952 case VMSTATE_SAVING: return "SAVING";
2953 case VMSTATE_DEBUGGING: return "DEBUGGING";
2954 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
2955 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
2956 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
2957 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
2958 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
2959 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
2960 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
2961 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
2962 case VMSTATE_OFF: return "OFF";
2963 case VMSTATE_OFF_LS: return "OFF_LS";
2964 case VMSTATE_DESTROYING: return "DESTROYING";
2965 case VMSTATE_TERMINATED: return "TERMINATED";
2966
2967 default:
2968 AssertMsgFailed(("Unknown state %d\n", enmState));
2969 return "Unknown!\n";
2970 }
2971}
2972
2973
2974/**
2975 * Validates the state transition in strict builds.
2976 *
2977 * @returns true if valid, false if not.
2978 *
2979 * @param enmStateOld The old (current) state.
2980 * @param enmStateNew The proposed new state.
2981 *
2982 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
2983 * diagram (under State Machine Diagram).
2984 */
2985static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
2986{
2987#ifndef VBOX_STRICT
2988 RT_NOREF2(enmStateOld, enmStateNew);
2989#else
2990 switch (enmStateOld)
2991 {
2992 case VMSTATE_CREATING:
2993 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2994 break;
2995
2996 case VMSTATE_CREATED:
2997 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
2998 || enmStateNew == VMSTATE_POWERING_ON
2999 || enmStateNew == VMSTATE_POWERING_OFF
3000 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3001 break;
3002
3003 case VMSTATE_LOADING:
3004 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3005 || enmStateNew == VMSTATE_LOAD_FAILURE
3006 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3007 break;
3008
3009 case VMSTATE_POWERING_ON:
3010 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3011 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3012 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3013 break;
3014
3015 case VMSTATE_RESUMING:
3016 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3017 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3018 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3019 break;
3020
3021 case VMSTATE_RUNNING:
3022 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3023 || enmStateNew == VMSTATE_SUSPENDING
3024 || enmStateNew == VMSTATE_RESETTING
3025 || enmStateNew == VMSTATE_SOFT_RESETTING
3026 || enmStateNew == VMSTATE_RUNNING_LS
3027 || enmStateNew == VMSTATE_DEBUGGING
3028 || enmStateNew == VMSTATE_FATAL_ERROR
3029 || enmStateNew == VMSTATE_GURU_MEDITATION
3030 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3031 break;
3032
3033 case VMSTATE_RUNNING_LS:
3034 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3035 || enmStateNew == VMSTATE_SUSPENDING_LS
3036 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3037 || enmStateNew == VMSTATE_RESETTING_LS
3038 || enmStateNew == VMSTATE_SOFT_RESETTING_LS
3039 || enmStateNew == VMSTATE_RUNNING
3040 || enmStateNew == VMSTATE_DEBUGGING_LS
3041 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3042 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3043 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3044 break;
3045
3046 case VMSTATE_RESETTING:
3047 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3048 break;
3049
3050 case VMSTATE_SOFT_RESETTING:
3051 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3052 break;
3053
3054 case VMSTATE_RESETTING_LS:
3055 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3056 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3057 break;
3058
3059 case VMSTATE_SOFT_RESETTING_LS:
3060 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING_LS
3061 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3062 break;
3063
3064 case VMSTATE_SUSPENDING:
3065 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3066 break;
3067
3068 case VMSTATE_SUSPENDING_LS:
3069 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3070 || enmStateNew == VMSTATE_SUSPENDED_LS
3071 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3072 break;
3073
3074 case VMSTATE_SUSPENDING_EXT_LS:
3075 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3076 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3077 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3078 break;
3079
3080 case VMSTATE_SUSPENDED:
3081 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3082 || enmStateNew == VMSTATE_SAVING
3083 || enmStateNew == VMSTATE_RESETTING
3084 || enmStateNew == VMSTATE_SOFT_RESETTING
3085 || enmStateNew == VMSTATE_RESUMING
3086 || enmStateNew == VMSTATE_LOADING
3087 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3088 break;
3089
3090 case VMSTATE_SUSPENDED_LS:
3091 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3092 || enmStateNew == VMSTATE_SAVING
3093 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3094 break;
3095
3096 case VMSTATE_SUSPENDED_EXT_LS:
3097 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3098 || enmStateNew == VMSTATE_SAVING
3099 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3100 break;
3101
3102 case VMSTATE_SAVING:
3103 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3104 break;
3105
3106 case VMSTATE_DEBUGGING:
3107 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3108 || enmStateNew == VMSTATE_POWERING_OFF
3109 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3110 break;
3111
3112 case VMSTATE_DEBUGGING_LS:
3113 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3114 || enmStateNew == VMSTATE_RUNNING_LS
3115 || enmStateNew == VMSTATE_POWERING_OFF_LS
3116 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3117 break;
3118
3119 case VMSTATE_POWERING_OFF:
3120 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3121 break;
3122
3123 case VMSTATE_POWERING_OFF_LS:
3124 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3125 || enmStateNew == VMSTATE_OFF_LS
3126 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3127 break;
3128
3129 case VMSTATE_OFF:
3130 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3131 break;
3132
3133 case VMSTATE_OFF_LS:
3134 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3135 break;
3136
3137 case VMSTATE_FATAL_ERROR:
3138 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3139 break;
3140
3141 case VMSTATE_FATAL_ERROR_LS:
3142 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3143 || enmStateNew == VMSTATE_POWERING_OFF_LS
3144 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3145 break;
3146
3147 case VMSTATE_GURU_MEDITATION:
3148 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3149 || enmStateNew == VMSTATE_POWERING_OFF
3150 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3151 break;
3152
3153 case VMSTATE_GURU_MEDITATION_LS:
3154 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3155 || enmStateNew == VMSTATE_DEBUGGING_LS
3156 || enmStateNew == VMSTATE_POWERING_OFF_LS
3157 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3158 break;
3159
3160 case VMSTATE_LOAD_FAILURE:
3161 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3162 break;
3163
3164 case VMSTATE_DESTROYING:
3165 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3166 break;
3167
3168 case VMSTATE_TERMINATED:
3169 default:
3170 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3171 break;
3172 }
3173#endif /* VBOX_STRICT */
3174 return true;
3175}
3176
3177
3178/**
3179 * Does the state change callouts.
3180 *
3181 * The caller owns the AtStateCritSect.
3182 *
3183 * @param pVM The cross context VM structure.
3184 * @param pUVM The UVM handle.
3185 * @param enmStateNew The New state.
3186 * @param enmStateOld The old state.
3187 */
3188static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3189{
3190 LogRel(("Changing the VM state from '%s' to '%s'\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3191
3192 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3193 {
3194 pCur->pfnAtState(pUVM, enmStateNew, enmStateOld, pCur->pvUser);
3195 if ( enmStateNew != VMSTATE_DESTROYING
3196 && pVM->enmVMState == VMSTATE_DESTROYING)
3197 break;
3198 AssertMsg(pVM->enmVMState == enmStateNew,
3199 ("You are not allowed to change the state while in the change callback, except "
3200 "from destroying the VM. There are restrictions in the way the state changes "
3201 "are propagated up to the EM execution loop and it makes the program flow very "
3202 "difficult to follow. (%s, expected %s, old %s)\n",
3203 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3204 VMR3GetStateName(enmStateOld)));
3205 }
3206}
3207
3208
3209/**
3210 * Sets the current VM state, with the AtStatCritSect already entered.
3211 *
3212 * @param pVM The cross context VM structure.
3213 * @param pUVM The UVM handle.
3214 * @param enmStateNew The new state.
3215 * @param enmStateOld The old state.
3216 * @param fSetRatherThanClearFF The usual behavior is to clear the
3217 * VM_FF_CHECK_VM_STATE force flag, but for
3218 * some transitions (-> guru) we need to kick
3219 * the other EMTs to stop what they're doing.
3220 */
3221static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF)
3222{
3223 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3224
3225 AssertMsg(pVM->enmVMState == enmStateOld,
3226 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3227
3228 pUVM->vm.s.enmPrevVMState = enmStateOld;
3229 pVM->enmVMState = enmStateNew;
3230
3231 if (!fSetRatherThanClearFF)
3232 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3233 else if (pVM->cCpus > 0)
3234 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3235
3236 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3237}
3238
3239
3240/**
3241 * Sets the current VM state.
3242 *
3243 * @param pVM The cross context VM structure.
3244 * @param enmStateNew The new state.
3245 * @param enmStateOld The old state (for asserting only).
3246 */
3247static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3248{
3249 PUVM pUVM = pVM->pUVM;
3250 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3251
3252 RT_NOREF_PV(enmStateOld);
3253 AssertMsg(pVM->enmVMState == enmStateOld,
3254 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3255 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState, false /*fSetRatherThanClearFF*/);
3256
3257 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3258}
3259
3260
3261/**
3262 * Tries to perform a state transition.
3263 *
3264 * @returns The 1-based ordinal of the succeeding transition.
3265 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3266 *
3267 * @param pVM The cross context VM structure.
3268 * @param pszWho Who is trying to change it.
3269 * @param cTransitions The number of transitions in the ellipsis.
3270 * @param ... Transition pairs; new, old.
3271 */
3272static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3273{
3274 va_list va;
3275 VMSTATE enmStateNew = VMSTATE_CREATED;
3276 VMSTATE enmStateOld = VMSTATE_CREATED;
3277
3278#ifdef VBOX_STRICT
3279 /*
3280 * Validate the input first.
3281 */
3282 va_start(va, cTransitions);
3283 for (unsigned i = 0; i < cTransitions; i++)
3284 {
3285 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3286 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3287 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3288 }
3289 va_end(va);
3290#endif
3291
3292 /*
3293 * Grab the lock and see if any of the proposed transitions works out.
3294 */
3295 va_start(va, cTransitions);
3296 int rc = VERR_VM_INVALID_VM_STATE;
3297 PUVM pUVM = pVM->pUVM;
3298 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3299
3300 VMSTATE enmStateCur = pVM->enmVMState;
3301
3302 for (unsigned i = 0; i < cTransitions; i++)
3303 {
3304 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3305 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3306 if (enmStateCur == enmStateOld)
3307 {
3308 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld, false /*fSetRatherThanClearFF*/);
3309 rc = i + 1;
3310 break;
3311 }
3312 }
3313
3314 if (RT_FAILURE(rc))
3315 {
3316 /*
3317 * Complain about it.
3318 */
3319 if (cTransitions == 1)
3320 {
3321 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3322 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3323 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3324 N_("%s failed because the VM state is %s instead of %s"),
3325 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3326 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3327 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3328 }
3329 else
3330 {
3331 va_end(va);
3332 va_start(va, cTransitions);
3333 LogRel(("%s:\n", pszWho));
3334 for (unsigned i = 0; i < cTransitions; i++)
3335 {
3336 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3337 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3338 LogRel(("%s%s -> %s",
3339 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3340 }
3341 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3342 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3343 N_("%s failed because the current VM state, %s, was not found in the state transition table (old state %s)"),
3344 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3345 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3346 pszWho, VMR3GetStateName(enmStateCur)));
3347 }
3348 }
3349
3350 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3351 va_end(va);
3352 Assert(rc > 0 || rc < 0);
3353 return rc;
3354}
3355
3356
3357/**
3358 * Interface used by EM to signal that it's entering the guru meditation state.
3359 *
3360 * This will notifying other threads.
3361 *
3362 * @returns true if the state changed to Guru, false if no state change.
3363 * @param pVM The cross context VM structure.
3364 */
3365VMMR3_INT_DECL(bool) VMR3SetGuruMeditation(PVM pVM)
3366{
3367 PUVM pUVM = pVM->pUVM;
3368 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3369
3370 VMSTATE enmStateCur = pVM->enmVMState;
3371 bool fRc = true;
3372 if (enmStateCur == VMSTATE_RUNNING)
3373 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING, true /*fSetRatherThanClearFF*/);
3374 else if (enmStateCur == VMSTATE_RUNNING_LS)
3375 {
3376 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS, true /*fSetRatherThanClearFF*/);
3377 SSMR3Cancel(pUVM);
3378 }
3379 else
3380 fRc = false;
3381
3382 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3383 return fRc;
3384}
3385
3386
3387/**
3388 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3389 *
3390 * @param pVM The cross context VM structure.
3391 */
3392void vmR3SetTerminated(PVM pVM)
3393{
3394 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3395}
3396
3397
3398/**
3399 * Checks if the VM was teleported and hasn't been fully resumed yet.
3400 *
3401 * This applies to both sides of the teleportation since we may leave a working
3402 * clone behind and the user is allowed to resume this...
3403 *
3404 * @returns true / false.
3405 * @param pVM The cross context VM structure.
3406 * @thread Any thread.
3407 */
3408VMMR3_INT_DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3409{
3410 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3411 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3412}
3413
3414
3415/**
3416 * Registers a VM state change callback.
3417 *
3418 * You are not allowed to call any function which changes the VM state from a
3419 * state callback.
3420 *
3421 * @returns VBox status code.
3422 * @param pUVM The VM handle.
3423 * @param pfnAtState Pointer to callback.
3424 * @param pvUser User argument.
3425 * @thread Any.
3426 */
3427VMMR3DECL(int) VMR3AtStateRegister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3428{
3429 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3430
3431 /*
3432 * Validate input.
3433 */
3434 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3435 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3436
3437 /*
3438 * Allocate a new record.
3439 */
3440 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3441 if (!pNew)
3442 return VERR_NO_MEMORY;
3443
3444 /* fill */
3445 pNew->pfnAtState = pfnAtState;
3446 pNew->pvUser = pvUser;
3447
3448 /* insert */
3449 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3450 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3451 *pUVM->vm.s.ppAtStateNext = pNew;
3452 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3453 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3454
3455 return VINF_SUCCESS;
3456}
3457
3458
3459/**
3460 * Deregisters a VM state change callback.
3461 *
3462 * @returns VBox status code.
3463 * @param pUVM The VM handle.
3464 * @param pfnAtState Pointer to callback.
3465 * @param pvUser User argument.
3466 * @thread Any.
3467 */
3468VMMR3DECL(int) VMR3AtStateDeregister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3469{
3470 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3471
3472 /*
3473 * Validate input.
3474 */
3475 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3476 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3477
3478 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3479
3480 /*
3481 * Search the list for the entry.
3482 */
3483 PVMATSTATE pPrev = NULL;
3484 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3485 while ( pCur
3486 && ( pCur->pfnAtState != pfnAtState
3487 || pCur->pvUser != pvUser))
3488 {
3489 pPrev = pCur;
3490 pCur = pCur->pNext;
3491 }
3492 if (!pCur)
3493 {
3494 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3495 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3496 return VERR_FILE_NOT_FOUND;
3497 }
3498
3499 /*
3500 * Unlink it.
3501 */
3502 if (pPrev)
3503 {
3504 pPrev->pNext = pCur->pNext;
3505 if (!pCur->pNext)
3506 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3507 }
3508 else
3509 {
3510 pUVM->vm.s.pAtState = pCur->pNext;
3511 if (!pCur->pNext)
3512 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3513 }
3514
3515 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3516
3517 /*
3518 * Free it.
3519 */
3520 pCur->pfnAtState = NULL;
3521 pCur->pNext = NULL;
3522 MMR3HeapFree(pCur);
3523
3524 return VINF_SUCCESS;
3525}
3526
3527
3528/**
3529 * Registers a VM error callback.
3530 *
3531 * @returns VBox status code.
3532 * @param pUVM The VM handle.
3533 * @param pfnAtError Pointer to callback.
3534 * @param pvUser User argument.
3535 * @thread Any.
3536 */
3537VMMR3DECL(int) VMR3AtErrorRegister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3538{
3539 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3540
3541 /*
3542 * Validate input.
3543 */
3544 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3545 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3546
3547 /*
3548 * Allocate a new record.
3549 */
3550 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3551 if (!pNew)
3552 return VERR_NO_MEMORY;
3553
3554 /* fill */
3555 pNew->pfnAtError = pfnAtError;
3556 pNew->pvUser = pvUser;
3557
3558 /* insert */
3559 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3560 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3561 *pUVM->vm.s.ppAtErrorNext = pNew;
3562 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3563 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3564
3565 return VINF_SUCCESS;
3566}
3567
3568
3569/**
3570 * Deregisters a VM error callback.
3571 *
3572 * @returns VBox status code.
3573 * @param pUVM The VM handle.
3574 * @param pfnAtError Pointer to callback.
3575 * @param pvUser User argument.
3576 * @thread Any.
3577 */
3578VMMR3DECL(int) VMR3AtErrorDeregister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3579{
3580 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3581
3582 /*
3583 * Validate input.
3584 */
3585 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3586 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3587
3588 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3589
3590 /*
3591 * Search the list for the entry.
3592 */
3593 PVMATERROR pPrev = NULL;
3594 PVMATERROR pCur = pUVM->vm.s.pAtError;
3595 while ( pCur
3596 && ( pCur->pfnAtError != pfnAtError
3597 || pCur->pvUser != pvUser))
3598 {
3599 pPrev = pCur;
3600 pCur = pCur->pNext;
3601 }
3602 if (!pCur)
3603 {
3604 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3605 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3606 return VERR_FILE_NOT_FOUND;
3607 }
3608
3609 /*
3610 * Unlink it.
3611 */
3612 if (pPrev)
3613 {
3614 pPrev->pNext = pCur->pNext;
3615 if (!pCur->pNext)
3616 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3617 }
3618 else
3619 {
3620 pUVM->vm.s.pAtError = pCur->pNext;
3621 if (!pCur->pNext)
3622 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3623 }
3624
3625 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3626
3627 /*
3628 * Free it.
3629 */
3630 pCur->pfnAtError = NULL;
3631 pCur->pNext = NULL;
3632 MMR3HeapFree(pCur);
3633
3634 return VINF_SUCCESS;
3635}
3636
3637
3638/**
3639 * Ellipsis to va_list wrapper for calling pfnAtError.
3640 */
3641static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3642{
3643 va_list va;
3644 va_start(va, pszFormat);
3645 pCur->pfnAtError(pVM->pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3646 va_end(va);
3647}
3648
3649
3650/**
3651 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3652 * The message is found in VMINT.
3653 *
3654 * @param pVM The cross context VM structure.
3655 * @thread EMT.
3656 */
3657VMMR3_INT_DECL(void) VMR3SetErrorWorker(PVM pVM)
3658{
3659 VM_ASSERT_EMT(pVM);
3660 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Congrats!\n"));
3661
3662 /*
3663 * Unpack the error (if we managed to format one).
3664 */
3665 PVMERROR pErr = pVM->vm.s.pErrorR3;
3666 const char *pszFile = NULL;
3667 const char *pszFunction = NULL;
3668 uint32_t iLine = 0;
3669 const char *pszMessage;
3670 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3671 if (pErr)
3672 {
3673 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3674 if (pErr->offFile)
3675 pszFile = (const char *)pErr + pErr->offFile;
3676 iLine = pErr->iLine;
3677 if (pErr->offFunction)
3678 pszFunction = (const char *)pErr + pErr->offFunction;
3679 if (pErr->offMessage)
3680 pszMessage = (const char *)pErr + pErr->offMessage;
3681 else
3682 pszMessage = "No message!";
3683 }
3684 else
3685 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3686
3687 /*
3688 * Call the at error callbacks.
3689 */
3690 PUVM pUVM = pVM->pUVM;
3691 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3692 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3693 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3694 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3695 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3696}
3697
3698
3699/**
3700 * Gets the number of errors raised via VMSetError.
3701 *
3702 * This can be used avoid double error messages.
3703 *
3704 * @returns The error count.
3705 * @param pUVM The VM handle.
3706 */
3707VMMR3_INT_DECL(uint32_t) VMR3GetErrorCount(PUVM pUVM)
3708{
3709 AssertPtrReturn(pUVM, 0);
3710 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3711 return pUVM->vm.s.cErrors;
3712}
3713
3714
3715/**
3716 * Creation time wrapper for vmR3SetErrorUV.
3717 *
3718 * @returns rc.
3719 * @param pUVM Pointer to the user mode VM structure.
3720 * @param rc The VBox status code.
3721 * @param SRC_POS The source position of this error.
3722 * @param pszFormat Format string.
3723 * @param ... The arguments.
3724 * @thread Any thread.
3725 */
3726static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3727{
3728 va_list va;
3729 va_start(va, pszFormat);
3730 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3731 va_end(va);
3732 return rc;
3733}
3734
3735
3736/**
3737 * Worker which calls everyone listening to the VM error messages.
3738 *
3739 * @param pUVM Pointer to the user mode VM structure.
3740 * @param rc The VBox status code.
3741 * @param SRC_POS The source position of this error.
3742 * @param pszFormat Format string.
3743 * @param pArgs Pointer to the format arguments.
3744 * @thread EMT
3745 */
3746DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3747{
3748 /*
3749 * Log the error.
3750 */
3751 va_list va3;
3752 va_copy(va3, *pArgs);
3753 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3754 "VMSetError: %N\n",
3755 pszFile, iLine, pszFunction, rc,
3756 pszFormat, &va3);
3757 va_end(va3);
3758
3759#ifdef LOG_ENABLED
3760 va_copy(va3, *pArgs);
3761 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3762 "%N\n",
3763 pszFile, iLine, pszFunction, rc,
3764 pszFormat, &va3);
3765 va_end(va3);
3766#endif
3767
3768 /*
3769 * Make a copy of the message.
3770 */
3771 if (pUVM->pVM)
3772 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3773
3774 /*
3775 * Call the at error callbacks.
3776 */
3777 bool fCalledSomeone = false;
3778 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3779 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3780 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3781 {
3782 va_list va2;
3783 va_copy(va2, *pArgs);
3784 pCur->pfnAtError(pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3785 va_end(va2);
3786 fCalledSomeone = true;
3787 }
3788 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3789}
3790
3791
3792/**
3793 * Sets the error message.
3794 *
3795 * @returns rc. Meaning you can do:
3796 * @code
3797 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
3798 * @endcode
3799 * @param pUVM The user mode VM handle.
3800 * @param rc VBox status code.
3801 * @param SRC_POS Use RT_SRC_POS.
3802 * @param pszFormat Error message format string.
3803 * @param ... Error message arguments.
3804 * @thread Any
3805 */
3806VMMR3DECL(int) VMR3SetError(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3807{
3808 va_list va;
3809 va_start(va, pszFormat);
3810 int rcRet = VMR3SetErrorV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
3811 va_end(va);
3812 return rcRet;
3813}
3814
3815
3816/**
3817 * Sets the error message.
3818 *
3819 * @returns rc. Meaning you can do:
3820 * @code
3821 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
3822 * @endcode
3823 * @param pUVM The user mode VM handle.
3824 * @param rc VBox status code.
3825 * @param SRC_POS Use RT_SRC_POS.
3826 * @param pszFormat Error message format string.
3827 * @param va Error message arguments.
3828 * @thread Any
3829 */
3830VMMR3DECL(int) VMR3SetErrorV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
3831{
3832 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3833
3834 /* Take shortcut when called on EMT, skipping VM handle requirement + validation. */
3835 if (VMR3GetVMCPUThread(pUVM) != NIL_RTTHREAD)
3836 {
3837 va_list vaCopy;
3838 va_copy(vaCopy, va);
3839 vmR3SetErrorUV(pUVM, rc, RT_SRC_POS_ARGS, pszFormat, &vaCopy);
3840 va_end(vaCopy);
3841 return rc;
3842 }
3843
3844 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
3845 return VMSetErrorV(pUVM->pVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
3846}
3847
3848
3849
3850/**
3851 * Registers a VM runtime error callback.
3852 *
3853 * @returns VBox status code.
3854 * @param pUVM The user mode VM structure.
3855 * @param pfnAtRuntimeError Pointer to callback.
3856 * @param pvUser User argument.
3857 * @thread Any.
3858 */
3859VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3860{
3861 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3862
3863 /*
3864 * Validate input.
3865 */
3866 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3867 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3868
3869 /*
3870 * Allocate a new record.
3871 */
3872 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3873 if (!pNew)
3874 return VERR_NO_MEMORY;
3875
3876 /* fill */
3877 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3878 pNew->pvUser = pvUser;
3879
3880 /* insert */
3881 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3882 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3883 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3884 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3885 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3886
3887 return VINF_SUCCESS;
3888}
3889
3890
3891/**
3892 * Deregisters a VM runtime error callback.
3893 *
3894 * @returns VBox status code.
3895 * @param pUVM The user mode VM handle.
3896 * @param pfnAtRuntimeError Pointer to callback.
3897 * @param pvUser User argument.
3898 * @thread Any.
3899 */
3900VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3901{
3902 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3903
3904 /*
3905 * Validate input.
3906 */
3907 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3908 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3909
3910 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3911
3912 /*
3913 * Search the list for the entry.
3914 */
3915 PVMATRUNTIMEERROR pPrev = NULL;
3916 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3917 while ( pCur
3918 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3919 || pCur->pvUser != pvUser))
3920 {
3921 pPrev = pCur;
3922 pCur = pCur->pNext;
3923 }
3924 if (!pCur)
3925 {
3926 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
3927 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3928 return VERR_FILE_NOT_FOUND;
3929 }
3930
3931 /*
3932 * Unlink it.
3933 */
3934 if (pPrev)
3935 {
3936 pPrev->pNext = pCur->pNext;
3937 if (!pCur->pNext)
3938 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
3939 }
3940 else
3941 {
3942 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
3943 if (!pCur->pNext)
3944 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
3945 }
3946
3947 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3948
3949 /*
3950 * Free it.
3951 */
3952 pCur->pfnAtRuntimeError = NULL;
3953 pCur->pNext = NULL;
3954 MMR3HeapFree(pCur);
3955
3956 return VINF_SUCCESS;
3957}
3958
3959
3960/**
3961 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
3962 * the state to FatalError(LS).
3963 *
3964 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
3965 * return code, see FNVMMEMTRENDEZVOUS.)
3966 *
3967 * @param pVM The cross context VM structure.
3968 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3969 * @param pvUser Ignored.
3970 */
3971static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
3972{
3973 NOREF(pVCpu);
3974 Assert(!pvUser); NOREF(pvUser);
3975
3976 /*
3977 * The first EMT thru here changes the state.
3978 */
3979 if (pVCpu->idCpu == pVM->cCpus - 1)
3980 {
3981 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
3982 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
3983 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
3984 if (RT_FAILURE(rc))
3985 return rc;
3986 if (rc == 2)
3987 SSMR3Cancel(pVM->pUVM);
3988
3989 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3990 }
3991
3992 /* This'll make sure we get out of whereever we are (e.g. REM). */
3993 return VINF_EM_SUSPEND;
3994}
3995
3996
3997/**
3998 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
3999 *
4000 * This does the common parts after the error has been saved / retrieved.
4001 *
4002 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4003 *
4004 * @param pVM The cross context VM structure.
4005 * @param fFlags The error flags.
4006 * @param pszErrorId Error ID string.
4007 * @param pszFormat Format string.
4008 * @param pVa Pointer to the format arguments.
4009 */
4010static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4011{
4012 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
4013 PUVM pUVM = pVM->pUVM;
4014
4015 /*
4016 * Take actions before the call.
4017 */
4018 int rc;
4019 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4020 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4021 vmR3SetRuntimeErrorChangeState, NULL);
4022 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4023 rc = VMR3Suspend(pUVM, VMSUSPENDREASON_RUNTIME_ERROR);
4024 else
4025 rc = VINF_SUCCESS;
4026
4027 /*
4028 * Do the callback round.
4029 */
4030 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4031 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4032 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4033 {
4034 va_list va;
4035 va_copy(va, *pVa);
4036 pCur->pfnAtRuntimeError(pUVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4037 va_end(va);
4038 }
4039 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4040
4041 return rc;
4042}
4043
4044
4045/**
4046 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4047 */
4048static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4049{
4050 va_list va;
4051 va_start(va, pszFormat);
4052 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4053 va_end(va);
4054 return rc;
4055}
4056
4057
4058/**
4059 * This is a worker function for RC and Ring-0 calls to VMSetError and
4060 * VMSetErrorV.
4061 *
4062 * The message is found in VMINT.
4063 *
4064 * @returns VBox status code, see VMSetRuntimeError.
4065 * @param pVM The cross context VM structure.
4066 * @thread EMT.
4067 */
4068VMMR3_INT_DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4069{
4070 VM_ASSERT_EMT(pVM);
4071 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4072
4073 /*
4074 * Unpack the error (if we managed to format one).
4075 */
4076 const char *pszErrorId = "SetRuntimeError";
4077 const char *pszMessage = "No message!";
4078 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4079 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4080 if (pErr)
4081 {
4082 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4083 if (pErr->offErrorId)
4084 pszErrorId = (const char *)pErr + pErr->offErrorId;
4085 if (pErr->offMessage)
4086 pszMessage = (const char *)pErr + pErr->offMessage;
4087 fFlags = pErr->fFlags;
4088 }
4089
4090 /*
4091 * Join cause with vmR3SetRuntimeErrorV.
4092 */
4093 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4094}
4095
4096
4097/**
4098 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4099 *
4100 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4101 *
4102 * @param pVM The cross context VM structure.
4103 * @param fFlags The error flags.
4104 * @param pszErrorId Error ID string.
4105 * @param pszMessage The error message residing the MM heap.
4106 *
4107 * @thread EMT
4108 */
4109DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4110{
4111#if 0 /** @todo make copy of the error msg. */
4112 /*
4113 * Make a copy of the message.
4114 */
4115 va_list va2;
4116 va_copy(va2, *pVa);
4117 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4118 va_end(va2);
4119#endif
4120
4121 /*
4122 * Join paths with VMR3SetRuntimeErrorWorker.
4123 */
4124 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4125 MMR3HeapFree(pszMessage);
4126 return rc;
4127}
4128
4129
4130/**
4131 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4132 *
4133 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4134 *
4135 * @param pVM The cross context VM structure.
4136 * @param fFlags The error flags.
4137 * @param pszErrorId Error ID string.
4138 * @param pszFormat Format string.
4139 * @param pVa Pointer to the format arguments.
4140 *
4141 * @thread EMT
4142 */
4143DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4144{
4145 /*
4146 * Make a copy of the message.
4147 */
4148 va_list va2;
4149 va_copy(va2, *pVa);
4150 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4151 va_end(va2);
4152
4153 /*
4154 * Join paths with VMR3SetRuntimeErrorWorker.
4155 */
4156 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4157}
4158
4159
4160/**
4161 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4162 *
4163 * This can be used avoid double error messages.
4164 *
4165 * @returns The runtime error count.
4166 * @param pUVM The user mode VM handle.
4167 */
4168VMMR3_INT_DECL(uint32_t) VMR3GetRuntimeErrorCount(PUVM pUVM)
4169{
4170 return pUVM->vm.s.cRuntimeErrors;
4171}
4172
4173
4174/**
4175 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4176 *
4177 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4178 *
4179 * @param pVM The cross context VM structure.
4180 */
4181VMMR3_INT_DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4182{
4183 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4184 return pUVCpu
4185 ? pUVCpu->idCpu
4186 : NIL_VMCPUID;
4187}
4188
4189
4190/**
4191 * Checks if the VM is long-mode (64-bit) capable or not.
4192 *
4193 * @returns true if VM can operate in long-mode, false otherwise.
4194 * @param pVM The cross context VM structure.
4195 */
4196VMMR3_INT_DECL(bool) VMR3IsLongModeAllowed(PVM pVM)
4197{
4198 switch (pVM->bMainExecutionEngine)
4199 {
4200 case VM_EXEC_ENGINE_HW_VIRT:
4201 return HMIsLongModeAllowed(pVM);
4202
4203 case VM_EXEC_ENGINE_NATIVE_API:
4204 return NEMHCIsLongModeAllowed(pVM);
4205
4206 case VM_EXEC_ENGINE_NOT_SET:
4207 AssertFailed();
4208 RT_FALL_THRU();
4209 default:
4210 return false;
4211 }
4212}
4213
4214
4215/**
4216 * Returns the native ID of the current EMT VMCPU thread.
4217 *
4218 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4219 * @param pVM The cross context VM structure.
4220 * @thread EMT
4221 */
4222VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4223{
4224 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4225
4226 if (!pUVCpu)
4227 return NIL_RTNATIVETHREAD;
4228
4229 return pUVCpu->vm.s.NativeThreadEMT;
4230}
4231
4232
4233/**
4234 * Returns the native ID of the current EMT VMCPU thread.
4235 *
4236 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4237 * @param pUVM The user mode VM structure.
4238 * @thread EMT
4239 */
4240VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4241{
4242 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4243
4244 if (!pUVCpu)
4245 return NIL_RTNATIVETHREAD;
4246
4247 return pUVCpu->vm.s.NativeThreadEMT;
4248}
4249
4250
4251/**
4252 * Returns the handle of the current EMT VMCPU thread.
4253 *
4254 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4255 * @param pUVM The user mode VM handle.
4256 * @thread EMT
4257 */
4258VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PUVM pUVM)
4259{
4260 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4261
4262 if (!pUVCpu)
4263 return NIL_RTTHREAD;
4264
4265 return pUVCpu->vm.s.ThreadEMT;
4266}
4267
4268
4269/**
4270 * Returns the handle of the current EMT VMCPU thread.
4271 *
4272 * @returns The IPRT thread handle.
4273 * @param pUVCpu The user mode CPU handle.
4274 * @thread EMT
4275 */
4276VMMR3_INT_DECL(RTTHREAD) VMR3GetThreadHandle(PUVMCPU pUVCpu)
4277{
4278 return pUVCpu->vm.s.ThreadEMT;
4279}
4280
4281
4282/**
4283 * Return the package and core ID of a CPU.
4284 *
4285 * @returns VBOX status code.
4286 * @param pUVM The user mode VM handle.
4287 * @param idCpu Virtual CPU to get the ID from.
4288 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4289 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4290 *
4291 */
4292VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PUVM pUVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4293{
4294 /*
4295 * Validate input.
4296 */
4297 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4298 PVM pVM = pUVM->pVM;
4299 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4300 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4301 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4302 if (idCpu >= pVM->cCpus)
4303 return VERR_INVALID_CPU_ID;
4304
4305 /*
4306 * Set return values.
4307 */
4308#ifdef VBOX_WITH_MULTI_CORE
4309 *pidCpuCore = idCpu;
4310 *pidCpuPackage = 0;
4311#else
4312 *pidCpuCore = 0;
4313 *pidCpuPackage = idCpu;
4314#endif
4315
4316 return VINF_SUCCESS;
4317}
4318
4319
4320/**
4321 * Worker for VMR3HotUnplugCpu.
4322 *
4323 * @returns VINF_EM_WAIT_SPIP (strict status code).
4324 * @param pVM The cross context VM structure.
4325 * @param idCpu The current CPU.
4326 */
4327static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4328{
4329 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4330 VMCPU_ASSERT_EMT(pVCpu);
4331
4332 /*
4333 * Reset per CPU resources.
4334 *
4335 * Actually only needed for VT-x because the CPU seems to be still in some
4336 * paged mode and startup fails after a new hot plug event. SVM works fine
4337 * even without this.
4338 */
4339 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4340 PGMR3ResetCpu(pVM, pVCpu);
4341 PDMR3ResetCpu(pVCpu);
4342 TRPMR3ResetCpu(pVCpu);
4343 CPUMR3ResetCpu(pVM, pVCpu);
4344 EMR3ResetCpu(pVCpu);
4345 HMR3ResetCpu(pVCpu);
4346 NEMR3ResetCpu(pVCpu, false /*fInitIpi*/);
4347 return VINF_EM_WAIT_SIPI;
4348}
4349
4350
4351/**
4352 * Hot-unplugs a CPU from the guest.
4353 *
4354 * @returns VBox status code.
4355 * @param pUVM The user mode VM handle.
4356 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4357 */
4358VMMR3DECL(int) VMR3HotUnplugCpu(PUVM pUVM, VMCPUID idCpu)
4359{
4360 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4361 PVM pVM = pUVM->pVM;
4362 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4363 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4364
4365 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4366 * broadcast requests. Just note down somewhere that the CPU is
4367 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4368 * it out of the EM loops when offline. */
4369 return VMR3ReqCallNoWaitU(pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4370}
4371
4372
4373/**
4374 * Hot-plugs a CPU on the guest.
4375 *
4376 * @returns VBox status code.
4377 * @param pUVM The user mode VM handle.
4378 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4379 */
4380VMMR3DECL(int) VMR3HotPlugCpu(PUVM pUVM, VMCPUID idCpu)
4381{
4382 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4383 PVM pVM = pUVM->pVM;
4384 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4385 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4386
4387 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4388 return VINF_SUCCESS;
4389}
4390
4391
4392/**
4393 * Changes the VMM execution cap.
4394 *
4395 * @returns VBox status code.
4396 * @param pUVM The user mode VM structure.
4397 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4398 * 100 is max performance (default).
4399 */
4400VMMR3DECL(int) VMR3SetCpuExecutionCap(PUVM pUVM, uint32_t uCpuExecutionCap)
4401{
4402 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4403 PVM pVM = pUVM->pVM;
4404 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4405 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4406
4407 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4408 /* Note: not called from EMT. */
4409 pVM->uCpuExecutionCap = uCpuExecutionCap;
4410 return VINF_SUCCESS;
4411}
4412
4413
4414/**
4415 * Control whether the VM should power off when resetting.
4416 *
4417 * @returns VBox status code.
4418 * @param pUVM The user mode VM handle.
4419 * @param fPowerOffInsteadOfReset Flag whether the VM should power off when
4420 * resetting.
4421 */
4422VMMR3DECL(int) VMR3SetPowerOffInsteadOfReset(PUVM pUVM, bool fPowerOffInsteadOfReset)
4423{
4424 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4425 PVM pVM = pUVM->pVM;
4426 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4427
4428 /* Note: not called from EMT. */
4429 pVM->vm.s.fPowerOffInsteadOfReset = fPowerOffInsteadOfReset;
4430 return VINF_SUCCESS;
4431}
4432
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette