VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 37354

Last change on this file since 37354 was 37211, checked in by vboxsync, 14 years ago

Some ASMAtomic*Size elimiation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 156.5 KB
Line 
1/* $Id: VM.cpp 37211 2011-05-25 11:37:52Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41/*******************************************************************************
42* Header Files *
43*******************************************************************************/
44#define LOG_GROUP LOG_GROUP_VM
45#include <VBox/vmm/cfgm.h>
46#include <VBox/vmm/vmm.h>
47#include <VBox/vmm/gvmm.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/cpum.h>
50#include <VBox/vmm/selm.h>
51#include <VBox/vmm/trpm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/vmm/pgm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/em.h>
57#include <VBox/vmm/iem.h>
58#include <VBox/vmm/rem.h>
59#include <VBox/vmm/tm.h>
60#include <VBox/vmm/stam.h>
61#include <VBox/vmm/patm.h>
62#include <VBox/vmm/csam.h>
63#include <VBox/vmm/iom.h>
64#include <VBox/vmm/ssm.h>
65#include <VBox/vmm/ftm.h>
66#include <VBox/vmm/hwaccm.h>
67#include "VMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70
71#include <VBox/sup.h>
72#include <VBox/dbg.h>
73#include <VBox/err.h>
74#include <VBox/param.h>
75#include <VBox/log.h>
76#include <iprt/assert.h>
77#include <iprt/alloc.h>
78#include <iprt/asm.h>
79#include <iprt/env.h>
80#include <iprt/string.h>
81#include <iprt/time.h>
82#include <iprt/semaphore.h>
83#include <iprt/thread.h>
84#include <iprt/uuid.h>
85
86
87/*******************************************************************************
88* Structures and Typedefs *
89*******************************************************************************/
90/**
91 * VM destruction callback registration record.
92 */
93typedef struct VMATDTOR
94{
95 /** Pointer to the next record in the list. */
96 struct VMATDTOR *pNext;
97 /** Pointer to the callback function. */
98 PFNVMATDTOR pfnAtDtor;
99 /** The user argument. */
100 void *pvUser;
101} VMATDTOR;
102/** Pointer to a VM destruction callback registration record. */
103typedef VMATDTOR *PVMATDTOR;
104
105
106/*******************************************************************************
107* Global Variables *
108*******************************************************************************/
109/** Pointer to the list of VMs. */
110static PUVM g_pUVMsHead = NULL;
111
112/** Pointer to the list of at VM destruction callbacks. */
113static PVMATDTOR g_pVMAtDtorHead = NULL;
114/** Lock the g_pVMAtDtorHead list. */
115#define VM_ATDTOR_LOCK() do { } while (0)
116/** Unlock the g_pVMAtDtorHead list. */
117#define VM_ATDTOR_UNLOCK() do { } while (0)
118
119
120/*******************************************************************************
121* Internal Functions *
122*******************************************************************************/
123static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
124static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
125static int vmR3InitRing3(PVM pVM, PUVM pUVM);
126static int vmR3InitRing0(PVM pVM);
127static int vmR3InitGC(PVM pVM);
128static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
129static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
130static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
131static void vmR3AtDtor(PVM pVM);
132static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
133static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
134static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
135static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
136static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
137static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
138
139
140/**
141 * Do global VMM init.
142 *
143 * @returns VBox status code.
144 */
145VMMR3DECL(int) VMR3GlobalInit(void)
146{
147 /*
148 * Only once.
149 */
150 static bool volatile s_fDone = false;
151 if (s_fDone)
152 return VINF_SUCCESS;
153
154 /*
155 * We're done.
156 */
157 s_fDone = true;
158 return VINF_SUCCESS;
159}
160
161
162
163/**
164 * Creates a virtual machine by calling the supplied configuration constructor.
165 *
166 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
167 * called to start the execution.
168 *
169 * @returns 0 on success.
170 * @returns VBox error code on failure.
171 * @param cCpus Number of virtual CPUs for the new VM.
172 * @param pVmm2UserMethods An optional method table that the VMM can use
173 * to make the user perform various action, like
174 * for instance state saving.
175 * @param pfnVMAtError Pointer to callback function for setting VM
176 * errors. This was added as an implicit call to
177 * VMR3AtErrorRegister() since there is no way the
178 * caller can get to the VM handle early enough to
179 * do this on its own.
180 * This is called in the context of an EMT.
181 * @param pvUserVM The user argument passed to pfnVMAtError.
182 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
183 * This is called in the context of an EMT0.
184 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
185 * @param ppVM Where to store the 'handle' of the created VM.
186 */
187VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
188 PFNVMATERROR pfnVMAtError, void *pvUserVM,
189 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
190 PVM *ppVM)
191{
192 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
193 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
194
195 if (pVmm2UserMethods)
196 {
197 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
198 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
199 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
200 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
201 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
202 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
203 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
204 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
205 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
206 }
207 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
208 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
209 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
210
211 /*
212 * Because of the current hackiness of the applications
213 * we'll have to initialize global stuff from here.
214 * Later the applications will take care of this in a proper way.
215 */
216 static bool fGlobalInitDone = false;
217 if (!fGlobalInitDone)
218 {
219 int rc = VMR3GlobalInit();
220 if (RT_FAILURE(rc))
221 return rc;
222 fGlobalInitDone = true;
223 }
224
225 /*
226 * Validate input.
227 */
228 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
229
230 /*
231 * Create the UVM so we can register the at-error callback
232 * and consolidate a bit of cleanup code.
233 */
234 PUVM pUVM = NULL; /* shuts up gcc */
235 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
236 if (RT_FAILURE(rc))
237 return rc;
238 if (pfnVMAtError)
239 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
240 if (RT_SUCCESS(rc))
241 {
242 /*
243 * Initialize the support library creating the session for this VM.
244 */
245 rc = SUPR3Init(&pUVM->vm.s.pSession);
246 if (RT_SUCCESS(rc))
247 {
248 /*
249 * Call vmR3CreateU in the EMT thread and wait for it to finish.
250 *
251 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
252 * submitting a request to a specific VCPU without a pVM. So, to make
253 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
254 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
255 */
256 PVMREQ pReq;
257 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
258 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
259 if (RT_SUCCESS(rc))
260 {
261 rc = pReq->iStatus;
262 VMR3ReqFree(pReq);
263 if (RT_SUCCESS(rc))
264 {
265 /*
266 * Success!
267 */
268 *ppVM = pUVM->pVM;
269 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
270 return VINF_SUCCESS;
271 }
272 }
273 else
274 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
275
276 /*
277 * An error occurred during VM creation. Set the error message directly
278 * using the initial callback, as the callback list might not exist yet.
279 */
280 const char *pszError;
281 switch (rc)
282 {
283 case VERR_VMX_IN_VMX_ROOT_MODE:
284#ifdef RT_OS_LINUX
285 pszError = N_("VirtualBox can't operate in VMX root mode. "
286 "Please disable the KVM kernel extension, recompile your kernel and reboot");
287#else
288 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
289#endif
290 break;
291
292#ifndef RT_OS_DARWIN
293 case VERR_HWACCM_CONFIG_MISMATCH:
294 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
295 "This hardware extension is required by the VM configuration");
296 break;
297#endif
298
299 case VERR_SVM_IN_USE:
300#ifdef RT_OS_LINUX
301 pszError = N_("VirtualBox can't enable the AMD-V extension. "
302 "Please disable the KVM kernel extension, recompile your kernel and reboot");
303#else
304 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
305#endif
306 break;
307
308#ifdef RT_OS_LINUX
309 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
310 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
311 "that no kernel modules from an older version of VirtualBox exist. "
312 "Then try to recompile and reload the kernel modules by executing "
313 "'/etc/init.d/vboxdrv setup' as root");
314 break;
315#endif
316
317 case VERR_RAW_MODE_INVALID_SMP:
318 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
319 "VirtualBox requires this hardware extension to emulate more than one "
320 "guest CPU");
321 break;
322
323 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
324#ifdef RT_OS_LINUX
325 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
326 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
327 "the VT-x extension in the VM settings. Note that without VT-x you have "
328 "to reduce the number of guest CPUs to one");
329#else
330 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
331 "extension. Either upgrade your kernel or disable the VT-x extension in the "
332 "VM settings. Note that without VT-x you have to reduce the number of guest "
333 "CPUs to one");
334#endif
335 break;
336
337 case VERR_PDM_DEVICE_NOT_FOUND:
338 pszError = N_("A virtual device is configured in the VM settings but the device "
339 "implementation is missing.\n"
340 "A possible reason for this error is a missing extension pack. Note "
341 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
342 "support and remote desktop) are only available from an 'extension "
343 "pack' which must be downloaded and installed separately");
344 break;
345
346 default:
347 if (VMR3GetErrorCountU(pUVM) == 0)
348 pszError = RTErrGetFull(rc);
349 else
350 pszError = NULL; /* already set. */
351 break;
352 }
353 if (pszError)
354 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
355 }
356 else
357 {
358 /*
359 * An error occurred at support library initialization time (before the
360 * VM could be created). Set the error message directly using the
361 * initial callback, as the callback list doesn't exist yet.
362 */
363 const char *pszError;
364 switch (rc)
365 {
366 case VERR_VM_DRIVER_LOAD_ERROR:
367#ifdef RT_OS_LINUX
368 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
369 "was either not loaded or /dev/vboxdrv is not set up properly. "
370 "Re-setup the kernel module by executing "
371 "'/etc/init.d/vboxdrv setup' as root");
372#else
373 pszError = N_("VirtualBox kernel driver not loaded");
374#endif
375 break;
376 case VERR_VM_DRIVER_OPEN_ERROR:
377 pszError = N_("VirtualBox kernel driver cannot be opened");
378 break;
379 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
380#ifdef VBOX_WITH_HARDENING
381 /* This should only happen if the executable wasn't hardened - bad code/build. */
382 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
383 "Re-install VirtualBox. If you are building it yourself, you "
384 "should make sure it installed correctly and that the setuid "
385 "bit is set on the executables calling VMR3Create.");
386#else
387 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
388# if defined(RT_OS_DARWIN)
389 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
390 "If you have built VirtualBox yourself, make sure that you do not "
391 "have the vboxdrv KEXT from a different build or installation loaded.");
392# elif defined(RT_OS_LINUX)
393 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
394 "If you have built VirtualBox yourself, make sure that you do "
395 "not have the vboxdrv kernel module from a different build or "
396 "installation loaded. Also, make sure the vboxdrv udev rule gives "
397 "you the permission you need to access the device.");
398# elif defined(RT_OS_WINDOWS)
399 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
400# else /* solaris, freebsd, ++. */
401 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
402 "If you have built VirtualBox yourself, make sure that you do "
403 "not have the vboxdrv kernel module from a different install loaded.");
404# endif
405#endif
406 break;
407 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
408 case VERR_VM_DRIVER_NOT_INSTALLED:
409#ifdef RT_OS_LINUX
410 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
411 "was either not loaded or /dev/vboxdrv was not created for some "
412 "reason. Re-setup the kernel module by executing "
413 "'/etc/init.d/vboxdrv setup' as root");
414#else
415 pszError = N_("VirtualBox kernel driver not installed");
416#endif
417 break;
418 case VERR_NO_MEMORY:
419 pszError = N_("VirtualBox support library out of memory");
420 break;
421 case VERR_VERSION_MISMATCH:
422 case VERR_VM_DRIVER_VERSION_MISMATCH:
423 pszError = N_("The VirtualBox support driver which is running is from a different "
424 "version of VirtualBox. You can correct this by stopping all "
425 "running instances of VirtualBox and reinstalling the software.");
426 break;
427 default:
428 pszError = N_("Unknown error initializing kernel driver");
429 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
430 }
431 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
432 }
433 }
434
435 /* cleanup */
436 vmR3DestroyUVM(pUVM, 2000);
437 LogFlow(("VMR3Create: returns %Rrc\n", rc));
438 return rc;
439}
440
441
442/**
443 * Creates the UVM.
444 *
445 * This will not initialize the support library even if vmR3DestroyUVM
446 * will terminate that.
447 *
448 * @returns VBox status code.
449 * @param cCpus Number of virtual CPUs
450 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
451 * table.
452 * @param ppUVM Where to store the UVM pointer.
453 */
454static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
455{
456 uint32_t i;
457
458 /*
459 * Create and initialize the UVM.
460 */
461 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
462 AssertReturn(pUVM, VERR_NO_MEMORY);
463 pUVM->u32Magic = UVM_MAGIC;
464 pUVM->cCpus = cCpus;
465 pUVM->pVmm2UserMethods = pVmm2UserMethods;
466
467 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
468
469 pUVM->vm.s.cUvmRefs = 1;
470 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
471 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
472 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
473
474 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
475 RTUuidClear(&pUVM->vm.s.Uuid);
476
477 /* Initialize the VMCPU array in the UVM. */
478 for (i = 0; i < cCpus; i++)
479 {
480 pUVM->aCpus[i].pUVM = pUVM;
481 pUVM->aCpus[i].idCpu = i;
482 }
483
484 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
485 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
486 AssertRC(rc);
487 if (RT_SUCCESS(rc))
488 {
489 /* Allocate a halt method event semaphore for each VCPU. */
490 for (i = 0; i < cCpus; i++)
491 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
492 for (i = 0; i < cCpus; i++)
493 {
494 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
495 if (RT_FAILURE(rc))
496 break;
497 }
498 if (RT_SUCCESS(rc))
499 {
500 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
501 if (RT_SUCCESS(rc))
502 {
503 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
504 if (RT_SUCCESS(rc))
505 {
506 /*
507 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
508 */
509 rc = STAMR3InitUVM(pUVM);
510 if (RT_SUCCESS(rc))
511 {
512 rc = MMR3InitUVM(pUVM);
513 if (RT_SUCCESS(rc))
514 {
515 rc = PDMR3InitUVM(pUVM);
516 if (RT_SUCCESS(rc))
517 {
518 /*
519 * Start the emulation threads for all VMCPUs.
520 */
521 for (i = 0; i < cCpus; i++)
522 {
523 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
524 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
525 cCpus > 1 ? "EMT-%u" : "EMT", i);
526 if (RT_FAILURE(rc))
527 break;
528
529 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
530 }
531
532 if (RT_SUCCESS(rc))
533 {
534 *ppUVM = pUVM;
535 return VINF_SUCCESS;
536 }
537
538 /* bail out. */
539 while (i-- > 0)
540 {
541 /** @todo rainy day: terminate the EMTs. */
542 }
543 PDMR3TermUVM(pUVM);
544 }
545 MMR3TermUVM(pUVM);
546 }
547 STAMR3TermUVM(pUVM);
548 }
549 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
550 }
551 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
552 }
553 }
554 for (i = 0; i < cCpus; i++)
555 {
556 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
557 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
558 }
559 RTTlsFree(pUVM->vm.s.idxTLS);
560 }
561 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
562 return rc;
563}
564
565
566/**
567 * Creates and initializes the VM.
568 *
569 * @thread EMT
570 */
571static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
572{
573 /*
574 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
575 */
576 int rc = PDMR3LdrLoadVMMR0U(pUVM);
577 if (RT_FAILURE(rc))
578 {
579 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
580 * bird: what about moving the message down here? Main picks the first message, right? */
581 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
582 return rc; /* proper error message set later on */
583 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
584 }
585
586 /*
587 * Request GVMM to create a new VM for us.
588 */
589 GVMMCREATEVMREQ CreateVMReq;
590 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
591 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
592 CreateVMReq.pSession = pUVM->vm.s.pSession;
593 CreateVMReq.pVMR0 = NIL_RTR0PTR;
594 CreateVMReq.pVMR3 = NULL;
595 CreateVMReq.cCpus = cCpus;
596 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
597 if (RT_SUCCESS(rc))
598 {
599 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
600 AssertRelease(VALID_PTR(pVM));
601 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
602 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
603 AssertRelease(pVM->cCpus == cCpus);
604 AssertRelease(pVM->uCpuExecutionCap == 100);
605 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
606
607 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
608 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
609
610 /*
611 * Initialize the VM structure and our internal data (VMINT).
612 */
613 pVM->pUVM = pUVM;
614
615 for (VMCPUID i = 0; i < pVM->cCpus; i++)
616 {
617 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
618 pVM->aCpus[i].idCpu = i;
619 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
620 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
621 /* hNativeThreadR0 is initialized on EMT registration. */
622 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
623 pUVM->aCpus[i].pVM = pVM;
624 }
625
626
627 /*
628 * Init the configuration.
629 */
630 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
631 if (RT_SUCCESS(rc))
632 {
633 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
634 rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
635 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
636 pVM->fHWACCMEnabled = true;
637
638 /*
639 * If executing in fake suplib mode disable RR3 and RR0 in the config.
640 */
641 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
642 if (psz && !strcmp(psz, "fake"))
643 {
644 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
645 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
646 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
647 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
648 }
649
650 /*
651 * Make sure the CPU count in the config data matches.
652 */
653 if (RT_SUCCESS(rc))
654 {
655 uint32_t cCPUsCfg;
656 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
657 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
658 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
659 {
660 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
661 cCPUsCfg, cCpus));
662 rc = VERR_INVALID_PARAMETER;
663 }
664 }
665
666 /*
667 * Get the CPU execution cap.
668 */
669 if (RT_SUCCESS(rc))
670 {
671 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
672 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc));
673 }
674
675 /*
676 * Get the VM name and UUID.
677 */
678 if (RT_SUCCESS(rc))
679 {
680 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
681 AssertLogRelMsg(RT_SUCCESS(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc));
682 }
683
684 if (RT_SUCCESS(rc))
685 {
686 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
687 AssertLogRelMsg(RT_SUCCESS(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc));
688 }
689
690 if (RT_SUCCESS(rc))
691 {
692 /*
693 * Init the ring-3 components and ring-3 per cpu data, finishing it off
694 * by a relocation round (intermediate context finalization will do this).
695 */
696 rc = vmR3InitRing3(pVM, pUVM);
697 if (RT_SUCCESS(rc))
698 {
699 rc = PGMR3FinalizeMappings(pVM);
700 if (RT_SUCCESS(rc))
701 {
702
703 LogFlow(("Ring-3 init succeeded\n"));
704
705 /*
706 * Init the Ring-0 components.
707 */
708 rc = vmR3InitRing0(pVM);
709 if (RT_SUCCESS(rc))
710 {
711 /* Relocate again, because some switcher fixups depends on R0 init results. */
712 VMR3Relocate(pVM, 0);
713
714#ifdef VBOX_WITH_DEBUGGER
715 /*
716 * Init the tcp debugger console if we're building
717 * with debugger support.
718 */
719 void *pvUser = NULL;
720 rc = DBGCTcpCreate(pVM, &pvUser);
721 if ( RT_SUCCESS(rc)
722 || rc == VERR_NET_ADDRESS_IN_USE)
723 {
724 pUVM->vm.s.pvDBGC = pvUser;
725#endif
726 /*
727 * Init the Guest Context components.
728 */
729 rc = vmR3InitGC(pVM);
730 if (RT_SUCCESS(rc))
731 {
732 /*
733 * Now we can safely set the VM halt method to default.
734 */
735 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
736 if (RT_SUCCESS(rc))
737 {
738 /*
739 * Set the state and link into the global list.
740 */
741 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
742 pUVM->pNext = g_pUVMsHead;
743 g_pUVMsHead = pUVM;
744
745#ifdef LOG_ENABLED
746 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
747#endif
748 return VINF_SUCCESS;
749 }
750 }
751#ifdef VBOX_WITH_DEBUGGER
752 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
753 pUVM->vm.s.pvDBGC = NULL;
754 }
755#endif
756 //..
757 }
758 }
759 vmR3Destroy(pVM);
760 }
761 }
762 //..
763
764 /* Clean CFGM. */
765 int rc2 = CFGMR3Term(pVM);
766 AssertRC(rc2);
767 }
768
769 /*
770 * Do automatic cleanups while the VM structure is still alive and all
771 * references to it are still working.
772 */
773 PDMR3CritSectTerm(pVM);
774
775 /*
776 * Drop all references to VM and the VMCPU structures, then
777 * tell GVMM to destroy the VM.
778 */
779 pUVM->pVM = NULL;
780 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
781 {
782 pUVM->aCpus[i].pVM = NULL;
783 pUVM->aCpus[i].pVCpu = NULL;
784 }
785 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
786
787 if (pUVM->cCpus > 1)
788 {
789 /* Poke the other EMTs since they may have stale pVM and pVCpu references
790 on the stack (see VMR3WaitU for instance) if they've been awakened after
791 VM creation. */
792 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
793 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
794 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
795 }
796
797 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
798 AssertRC(rc2);
799 }
800 else
801 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
802
803 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
804 return rc;
805}
806
807
808/**
809 * Register the calling EMT with GVM.
810 *
811 * @returns VBox status code.
812 * @param pVM The VM handle.
813 * @param idCpu The Virtual CPU ID.
814 */
815static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
816{
817 Assert(VMMGetCpuId(pVM) == idCpu);
818 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
819 if (RT_FAILURE(rc))
820 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
821 return rc;
822}
823
824
825/**
826 * Initializes all R3 components of the VM
827 */
828static int vmR3InitRing3(PVM pVM, PUVM pUVM)
829{
830 int rc;
831
832 /*
833 * Register the other EMTs with GVM.
834 */
835 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
836 {
837 rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
838 if (RT_FAILURE(rc))
839 return rc;
840 }
841
842 /*
843 * Init all R3 components, the order here might be important.
844 */
845 rc = MMR3Init(pVM);
846 if (RT_SUCCESS(rc))
847 {
848 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
849 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
850 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
851 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
852 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
853 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
854 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
855 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
856 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
857 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
858 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
859 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
860 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
861 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
862
863 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
864 {
865 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
866 AssertRC(rc);
867 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
868 AssertRC(rc);
869 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/VM/CPU%d/Halt/BlockOverslept", idCpu);
870 AssertRC(rc);
871 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/VM/CPU%d/Halt/BlockInsomnia", idCpu);
872 AssertRC(rc);
873 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/VM/CPU%d/Halt/BlockOnTime", idCpu);
874 AssertRC(rc);
875 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
876 AssertRC(rc);
877 }
878
879 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
880 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
881 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
882 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
883 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
884 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
885 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
886 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
887
888 rc = CPUMR3Init(pVM);
889 if (RT_SUCCESS(rc))
890 {
891 rc = HWACCMR3Init(pVM);
892 if (RT_SUCCESS(rc))
893 {
894 rc = PGMR3Init(pVM);
895 if (RT_SUCCESS(rc))
896 {
897 rc = REMR3Init(pVM);
898 if (RT_SUCCESS(rc))
899 {
900 rc = MMR3InitPaging(pVM);
901 if (RT_SUCCESS(rc))
902 rc = TMR3Init(pVM);
903 if (RT_SUCCESS(rc))
904 {
905 rc = FTMR3Init(pVM);
906 if (RT_SUCCESS(rc))
907 {
908 rc = VMMR3Init(pVM);
909 if (RT_SUCCESS(rc))
910 {
911 rc = SELMR3Init(pVM);
912 if (RT_SUCCESS(rc))
913 {
914 rc = TRPMR3Init(pVM);
915 if (RT_SUCCESS(rc))
916 {
917 rc = CSAMR3Init(pVM);
918 if (RT_SUCCESS(rc))
919 {
920 rc = PATMR3Init(pVM);
921 if (RT_SUCCESS(rc))
922 {
923 rc = IOMR3Init(pVM);
924 if (RT_SUCCESS(rc))
925 {
926 rc = EMR3Init(pVM);
927 if (RT_SUCCESS(rc))
928 {
929 rc = IEMR3Init(pVM);
930 if (RT_SUCCESS(rc))
931 {
932 rc = DBGFR3Init(pVM);
933 if (RT_SUCCESS(rc))
934 {
935 rc = PDMR3Init(pVM);
936 if (RT_SUCCESS(rc))
937 {
938 rc = PGMR3InitDynMap(pVM);
939 if (RT_SUCCESS(rc))
940 rc = MMR3HyperInitFinalize(pVM);
941 if (RT_SUCCESS(rc))
942 rc = PATMR3InitFinalize(pVM);
943 if (RT_SUCCESS(rc))
944 rc = PGMR3InitFinalize(pVM);
945 if (RT_SUCCESS(rc))
946 rc = SELMR3InitFinalize(pVM);
947 if (RT_SUCCESS(rc))
948 rc = TMR3InitFinalize(pVM);
949 if (RT_SUCCESS(rc))
950 rc = REMR3InitFinalize(pVM);
951 if (RT_SUCCESS(rc))
952 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
953 if (RT_SUCCESS(rc))
954 {
955 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
956 return VINF_SUCCESS;
957 }
958
959 int rc2 = PDMR3Term(pVM);
960 AssertRC(rc2);
961 }
962 int rc2 = DBGFR3Term(pVM);
963 AssertRC(rc2);
964 }
965 int rc2 = IEMR3Term(pVM);
966 AssertRC(rc2);
967 }
968 int rc2 = EMR3Term(pVM);
969 AssertRC(rc2);
970 }
971 int rc2 = IOMR3Term(pVM);
972 AssertRC(rc2);
973 }
974 int rc2 = PATMR3Term(pVM);
975 AssertRC(rc2);
976 }
977 int rc2 = CSAMR3Term(pVM);
978 AssertRC(rc2);
979 }
980 int rc2 = TRPMR3Term(pVM);
981 AssertRC(rc2);
982 }
983 int rc2 = SELMR3Term(pVM);
984 AssertRC(rc2);
985 }
986 int rc2 = VMMR3Term(pVM);
987 AssertRC(rc2);
988 }
989 int rc2 = FTMR3Term(pVM);
990 AssertRC(rc2);
991 }
992 int rc2 = TMR3Term(pVM);
993 AssertRC(rc2);
994 }
995 int rc2 = REMR3Term(pVM);
996 AssertRC(rc2);
997 }
998 int rc2 = PGMR3Term(pVM);
999 AssertRC(rc2);
1000 }
1001 int rc2 = HWACCMR3Term(pVM);
1002 AssertRC(rc2);
1003 }
1004 //int rc2 = CPUMR3Term(pVM);
1005 //AssertRC(rc2);
1006 }
1007 /* MMR3Term is not called here because it'll kill the heap. */
1008 }
1009
1010 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
1011 return rc;
1012}
1013
1014
1015/**
1016 * Initializes all R0 components of the VM
1017 */
1018static int vmR3InitRing0(PVM pVM)
1019{
1020 LogFlow(("vmR3InitRing0:\n"));
1021
1022 /*
1023 * Check for FAKE suplib mode.
1024 */
1025 int rc = VINF_SUCCESS;
1026 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1027 if (!psz || strcmp(psz, "fake"))
1028 {
1029 /*
1030 * Call the VMMR0 component and let it do the init.
1031 */
1032 rc = VMMR3InitR0(pVM);
1033 }
1034 else
1035 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1036
1037 /*
1038 * Do notifications and return.
1039 */
1040 if (RT_SUCCESS(rc))
1041 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1042 if (RT_SUCCESS(rc))
1043 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HWACCM);
1044
1045 /** @todo Move this to the VMINITCOMPLETED_HWACCM notification handler. */
1046 if (RT_SUCCESS(rc))
1047 CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
1048
1049 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1050 return rc;
1051}
1052
1053
1054/**
1055 * Initializes all GC components of the VM
1056 */
1057static int vmR3InitGC(PVM pVM)
1058{
1059 LogFlow(("vmR3InitGC:\n"));
1060
1061 /*
1062 * Check for FAKE suplib mode.
1063 */
1064 int rc = VINF_SUCCESS;
1065 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1066 if (!psz || strcmp(psz, "fake"))
1067 {
1068 /*
1069 * Call the VMMR0 component and let it do the init.
1070 */
1071 rc = VMMR3InitRC(pVM);
1072 }
1073 else
1074 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1075
1076 /*
1077 * Do notifications and return.
1078 */
1079 if (RT_SUCCESS(rc))
1080 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1081 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1082 return rc;
1083}
1084
1085
1086/**
1087 * Do init completed notifications.
1088 *
1089 * @returns VBox status code.
1090 * @param pVM The VM handle.
1091 * @param enmWhat What's completed.
1092 */
1093static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1094{
1095 int rc = VMMR3InitCompleted(pVM, enmWhat);
1096 if (RT_SUCCESS(rc))
1097 rc = HWACCMR3InitCompleted(pVM, enmWhat);
1098 if (RT_SUCCESS(rc))
1099 rc = PGMR3InitCompleted(pVM, enmWhat);
1100 return rc;
1101}
1102
1103
1104/**
1105 * Logger callback for inserting a custom prefix.
1106 *
1107 * @returns Number of chars written.
1108 * @param pLogger The logger.
1109 * @param pchBuf The output buffer.
1110 * @param cchBuf The output buffer size.
1111 * @param pvUser Pointer to the UVM structure.
1112 */
1113static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1114{
1115 AssertReturn(cchBuf >= 2, 0);
1116 PUVM pUVM = (PUVM)pvUser;
1117 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1118 if (pUVCpu)
1119 {
1120 static const char s_szHex[17] = "0123456789abcdef";
1121 VMCPUID const idCpu = pUVCpu->idCpu;
1122 pchBuf[1] = s_szHex[ idCpu & 15];
1123 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1124 }
1125 else
1126 {
1127 pchBuf[0] = 'x';
1128 pchBuf[1] = 'y';
1129 }
1130
1131 return 2;
1132}
1133
1134
1135/**
1136 * Calls the relocation functions for all VMM components so they can update
1137 * any GC pointers. When this function is called all the basic VM members
1138 * have been updated and the actual memory relocation have been done
1139 * by the PGM/MM.
1140 *
1141 * This is used both on init and on runtime relocations.
1142 *
1143 * @param pVM VM handle.
1144 * @param offDelta Relocation delta relative to old location.
1145 */
1146VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1147{
1148 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1149
1150 /*
1151 * The order here is very important!
1152 */
1153 PGMR3Relocate(pVM, offDelta);
1154 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1155 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1156 CPUMR3Relocate(pVM);
1157 HWACCMR3Relocate(pVM);
1158 SELMR3Relocate(pVM);
1159 VMMR3Relocate(pVM, offDelta);
1160 SELMR3Relocate(pVM); /* !hack! fix stack! */
1161 TRPMR3Relocate(pVM, offDelta);
1162 PATMR3Relocate(pVM);
1163 CSAMR3Relocate(pVM, offDelta);
1164 IOMR3Relocate(pVM, offDelta);
1165 EMR3Relocate(pVM);
1166 TMR3Relocate(pVM, offDelta);
1167 IEMR3Relocate(pVM);
1168 DBGFR3Relocate(pVM, offDelta);
1169 PDMR3Relocate(pVM, offDelta);
1170}
1171
1172
1173/**
1174 * EMT rendezvous worker for VMR3PowerOn.
1175 *
1176 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1177 * code, see FNVMMEMTRENDEZVOUS.)
1178 *
1179 * @param pVM The VM handle.
1180 * @param pVCpu The VMCPU handle of the EMT.
1181 * @param pvUser Ignored.
1182 */
1183static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1184{
1185 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1186 Assert(!pvUser); NOREF(pvUser);
1187
1188 /*
1189 * The first thread thru here tries to change the state. We shouldn't be
1190 * called again if this fails.
1191 */
1192 if (pVCpu->idCpu == pVM->cCpus - 1)
1193 {
1194 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1195 if (RT_FAILURE(rc))
1196 return rc;
1197 }
1198
1199 VMSTATE enmVMState = VMR3GetState(pVM);
1200 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1201 ("%s\n", VMR3GetStateName(enmVMState)),
1202 VERR_INTERNAL_ERROR_4);
1203
1204 /*
1205 * All EMTs changes their state to started.
1206 */
1207 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1208
1209 /*
1210 * EMT(0) is last thru here and it will make the notification calls
1211 * and advance the state.
1212 */
1213 if (pVCpu->idCpu == 0)
1214 {
1215 PDMR3PowerOn(pVM);
1216 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1217 }
1218
1219 return VINF_SUCCESS;
1220}
1221
1222
1223/**
1224 * Powers on the virtual machine.
1225 *
1226 * @returns VBox status code.
1227 *
1228 * @param pVM The VM to power on.
1229 *
1230 * @thread Any thread.
1231 * @vmstate Created
1232 * @vmstateto PoweringOn+Running
1233 */
1234VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1235{
1236 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1237 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1238
1239 /*
1240 * Gather all the EMTs to reduce the init TSC drift and keep
1241 * the state changing APIs a bit uniform.
1242 */
1243 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1244 vmR3PowerOn, NULL);
1245 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1246 return rc;
1247}
1248
1249
1250/**
1251 * Does the suspend notifications.
1252 *
1253 * @param pVM The VM handle.
1254 * @thread EMT(0)
1255 */
1256static void vmR3SuspendDoWork(PVM pVM)
1257{
1258 PDMR3Suspend(pVM);
1259}
1260
1261
1262/**
1263 * EMT rendezvous worker for VMR3Suspend.
1264 *
1265 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1266 * return code, see FNVMMEMTRENDEZVOUS.)
1267 *
1268 * @param pVM The VM handle.
1269 * @param pVCpu The VMCPU handle of the EMT.
1270 * @param pvUser Ignored.
1271 */
1272static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1273{
1274 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1275 Assert(!pvUser); NOREF(pvUser);
1276
1277 /*
1278 * The first EMT switches the state to suspending. If this fails because
1279 * something was racing us in one way or the other, there will be no more
1280 * calls and thus the state assertion below is not going to annoy anyone.
1281 */
1282 if (pVCpu->idCpu == pVM->cCpus - 1)
1283 {
1284 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1285 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1286 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1287 if (RT_FAILURE(rc))
1288 return rc;
1289 }
1290
1291 VMSTATE enmVMState = VMR3GetState(pVM);
1292 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1293 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1294 ("%s\n", VMR3GetStateName(enmVMState)),
1295 VERR_INTERNAL_ERROR_4);
1296
1297 /*
1298 * EMT(0) does the actually suspending *after* all the other CPUs have
1299 * been thru here.
1300 */
1301 if (pVCpu->idCpu == 0)
1302 {
1303 vmR3SuspendDoWork(pVM);
1304
1305 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1306 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1307 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1308 if (RT_FAILURE(rc))
1309 return VERR_INTERNAL_ERROR_3;
1310 }
1311
1312 return VINF_EM_SUSPEND;
1313}
1314
1315
1316/**
1317 * Suspends a running VM.
1318 *
1319 * @returns VBox status code. When called on EMT, this will be a strict status
1320 * code that has to be propagated up the call stack.
1321 *
1322 * @param pVM The VM to suspend.
1323 *
1324 * @thread Any thread.
1325 * @vmstate Running or RunningLS
1326 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1327 */
1328VMMR3DECL(int) VMR3Suspend(PVM pVM)
1329{
1330 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1331 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1332
1333 /*
1334 * Gather all the EMTs to make sure there are no races before
1335 * changing the VM state.
1336 */
1337 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1338 vmR3Suspend, NULL);
1339 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1340 return rc;
1341}
1342
1343
1344/**
1345 * EMT rendezvous worker for VMR3Resume.
1346 *
1347 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1348 * return code, see FNVMMEMTRENDEZVOUS.)
1349 *
1350 * @param pVM The VM handle.
1351 * @param pVCpu The VMCPU handle of the EMT.
1352 * @param pvUser Ignored.
1353 */
1354static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1355{
1356 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1357 Assert(!pvUser); NOREF(pvUser);
1358
1359 /*
1360 * The first thread thru here tries to change the state. We shouldn't be
1361 * called again if this fails.
1362 */
1363 if (pVCpu->idCpu == pVM->cCpus - 1)
1364 {
1365 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1366 if (RT_FAILURE(rc))
1367 return rc;
1368 }
1369
1370 VMSTATE enmVMState = VMR3GetState(pVM);
1371 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1372 ("%s\n", VMR3GetStateName(enmVMState)),
1373 VERR_INTERNAL_ERROR_4);
1374
1375#if 0
1376 /*
1377 * All EMTs changes their state to started.
1378 */
1379 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1380#endif
1381
1382 /*
1383 * EMT(0) is last thru here and it will make the notification calls
1384 * and advance the state.
1385 */
1386 if (pVCpu->idCpu == 0)
1387 {
1388 PDMR3Resume(pVM);
1389 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1390 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1391 }
1392
1393 return VINF_EM_RESUME;
1394}
1395
1396
1397/**
1398 * Resume VM execution.
1399 *
1400 * @returns VBox status code. When called on EMT, this will be a strict status
1401 * code that has to be propagated up the call stack.
1402 *
1403 * @param pVM The VM to resume.
1404 *
1405 * @thread Any thread.
1406 * @vmstate Suspended
1407 * @vmstateto Running
1408 */
1409VMMR3DECL(int) VMR3Resume(PVM pVM)
1410{
1411 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1412 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1413
1414 /*
1415 * Gather all the EMTs to make sure there are no races before
1416 * changing the VM state.
1417 */
1418 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1419 vmR3Resume, NULL);
1420 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1421 return rc;
1422}
1423
1424
1425/**
1426 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1427 * after the live step has been completed.
1428 *
1429 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1430 * return code, see FNVMMEMTRENDEZVOUS.)
1431 *
1432 * @param pVM The VM handle.
1433 * @param pVCpu The VMCPU handle of the EMT.
1434 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1435 */
1436static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1437{
1438 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1439 bool *pfSuspended = (bool *)pvUser;
1440
1441 /*
1442 * The first thread thru here tries to change the state. We shouldn't be
1443 * called again if this fails.
1444 */
1445 if (pVCpu->idCpu == pVM->cCpus - 1U)
1446 {
1447 PUVM pUVM = pVM->pUVM;
1448 int rc;
1449
1450 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1451 VMSTATE enmVMState = pVM->enmVMState;
1452 switch (enmVMState)
1453 {
1454 case VMSTATE_RUNNING_LS:
1455 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS);
1456 rc = VINF_SUCCESS;
1457 break;
1458
1459 case VMSTATE_SUSPENDED_EXT_LS:
1460 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1461 rc = VINF_SUCCESS;
1462 break;
1463
1464 case VMSTATE_DEBUGGING_LS:
1465 rc = VERR_TRY_AGAIN;
1466 break;
1467
1468 case VMSTATE_OFF_LS:
1469 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS);
1470 rc = VERR_SSM_LIVE_POWERED_OFF;
1471 break;
1472
1473 case VMSTATE_FATAL_ERROR_LS:
1474 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS);
1475 rc = VERR_SSM_LIVE_FATAL_ERROR;
1476 break;
1477
1478 case VMSTATE_GURU_MEDITATION_LS:
1479 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS);
1480 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1481 break;
1482
1483 case VMSTATE_POWERING_OFF_LS:
1484 case VMSTATE_SUSPENDING_EXT_LS:
1485 case VMSTATE_RESETTING_LS:
1486 default:
1487 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1488 rc = VERR_INTERNAL_ERROR_3;
1489 break;
1490 }
1491 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1492 if (RT_FAILURE(rc))
1493 {
1494 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1495 return rc;
1496 }
1497 }
1498
1499 VMSTATE enmVMState = VMR3GetState(pVM);
1500 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1501 ("%s\n", VMR3GetStateName(enmVMState)),
1502 VERR_INTERNAL_ERROR_4);
1503
1504 /*
1505 * Only EMT(0) have work to do since it's last thru here.
1506 */
1507 if (pVCpu->idCpu == 0)
1508 {
1509 vmR3SuspendDoWork(pVM);
1510 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1511 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1512 if (RT_FAILURE(rc))
1513 return VERR_INTERNAL_ERROR_3;
1514
1515 *pfSuspended = true;
1516 }
1517
1518 return VINF_EM_SUSPEND;
1519}
1520
1521
1522/**
1523 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1524 * SSMR3LiveDoStep1 failure.
1525 *
1526 * Doing this as a rendezvous operation avoids all annoying transition
1527 * states.
1528 *
1529 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1530 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1531 *
1532 * @param pVM The VM handle.
1533 * @param pVCpu The VMCPU handle of the EMT.
1534 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1535 */
1536static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1537{
1538 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1539 bool *pfSuspended = (bool *)pvUser;
1540 NOREF(pVCpu);
1541
1542 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1543 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1544 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1545 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1546 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1547 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1548 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1549 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1550 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1551 if (rc == 1)
1552 rc = VERR_SSM_LIVE_POWERED_OFF;
1553 else if (rc == 2)
1554 rc = VERR_SSM_LIVE_FATAL_ERROR;
1555 else if (rc == 3)
1556 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1557 else if (rc == 4)
1558 {
1559 *pfSuspended = true;
1560 rc = VINF_SUCCESS;
1561 }
1562 else if (rc > 0)
1563 rc = VINF_SUCCESS;
1564 return rc;
1565}
1566
1567
1568/**
1569 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1570 *
1571 * @returns VBox status code.
1572 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1573 *
1574 * @param pVM The VM handle.
1575 * @param pSSM The handle of saved state operation.
1576 *
1577 * @thread EMT(0)
1578 */
1579static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1580{
1581 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1582 VM_ASSERT_EMT0(pVM);
1583
1584 /*
1585 * Advance the state and mark if VMR3Suspend was called.
1586 */
1587 int rc = VINF_SUCCESS;
1588 VMSTATE enmVMState = VMR3GetState(pVM);
1589 if (enmVMState == VMSTATE_SUSPENDED_LS)
1590 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1591 else
1592 {
1593 if (enmVMState != VMSTATE_SAVING)
1594 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1595 rc = VINF_SSM_LIVE_SUSPENDED;
1596 }
1597
1598 /*
1599 * Finish up and release the handle. Careful with the status codes.
1600 */
1601 int rc2 = SSMR3LiveDoStep2(pSSM);
1602 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1603 rc = rc2;
1604
1605 rc2 = SSMR3LiveDone(pSSM);
1606 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1607 rc = rc2;
1608
1609 /*
1610 * Advance to the final state and return.
1611 */
1612 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1613 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1614 return rc;
1615}
1616
1617
1618/**
1619 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1620 * SSMR3LiveSave.
1621 *
1622 * @returns VBox status code.
1623 *
1624 * @param pVM The VM handle.
1625 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1626 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1627 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1628 * @param pvStreamOpsUser The user argument to the stream methods.
1629 * @param enmAfter What to do afterwards.
1630 * @param pfnProgress Progress callback. Optional.
1631 * @param pvProgressUser User argument for the progress callback.
1632 * @param ppSSM Where to return the saved state handle in case of a
1633 * live snapshot scenario.
1634 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1635 *
1636 * @thread EMT
1637 */
1638static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1639 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM,
1640 bool fSkipStateChanges)
1641{
1642 int rc = VINF_SUCCESS;
1643
1644 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1645 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1646
1647 /*
1648 * Validate input.
1649 */
1650 AssertPtrNull(pszFilename);
1651 AssertPtrNull(pStreamOps);
1652 AssertPtr(pVM);
1653 Assert( enmAfter == SSMAFTER_DESTROY
1654 || enmAfter == SSMAFTER_CONTINUE
1655 || enmAfter == SSMAFTER_TELEPORT);
1656 AssertPtr(ppSSM);
1657 *ppSSM = NULL;
1658
1659 /*
1660 * Change the state and perform/start the saving.
1661 */
1662 if (!fSkipStateChanges)
1663 {
1664 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1665 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1666 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1667 }
1668 else
1669 {
1670 Assert(enmAfter != SSMAFTER_TELEPORT);
1671 rc = 1;
1672 }
1673
1674 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1675 {
1676 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1677 if (!fSkipStateChanges)
1678 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1679 }
1680 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1681 {
1682 Assert(!fSkipStateChanges);
1683 if (enmAfter == SSMAFTER_TELEPORT)
1684 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1685 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1686 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1687 /* (We're not subject to cancellation just yet.) */
1688 }
1689 else
1690 Assert(RT_FAILURE(rc));
1691 return rc;
1692}
1693
1694
1695/**
1696 * Common worker for VMR3Save and VMR3Teleport.
1697 *
1698 * @returns VBox status code.
1699 *
1700 * @param pVM The VM handle.
1701 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1702 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1703 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1704 * @param pvStreamOpsUser The user argument to the stream methods.
1705 * @param enmAfter What to do afterwards.
1706 * @param pfnProgress Progress callback. Optional.
1707 * @param pvProgressUser User argument for the progress callback.
1708 * @param pfSuspended Set if we suspended the VM.
1709 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1710 *
1711 * @thread Non-EMT
1712 */
1713static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1714 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1715 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended,
1716 bool fSkipStateChanges)
1717{
1718 /*
1719 * Request the operation in EMT(0).
1720 */
1721 PSSMHANDLE pSSM;
1722 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1723 (PFNRT)vmR3Save, 10, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1724 enmAfter, pfnProgress, pvProgressUser, &pSSM, fSkipStateChanges);
1725 if ( RT_SUCCESS(rc)
1726 && pSSM)
1727 {
1728 Assert(!fSkipStateChanges);
1729
1730 /*
1731 * Live snapshot.
1732 *
1733 * The state handling here is kind of tricky, doing it on EMT(0) helps
1734 * a bit. See the VMSTATE diagram for details.
1735 */
1736 rc = SSMR3LiveDoStep1(pSSM);
1737 if (RT_SUCCESS(rc))
1738 {
1739 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1740 for (;;)
1741 {
1742 /* Try suspend the VM. */
1743 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1744 vmR3LiveDoSuspend, pfSuspended);
1745 if (rc != VERR_TRY_AGAIN)
1746 break;
1747
1748 /* Wait for the state to change. */
1749 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1750 }
1751 if (RT_SUCCESS(rc))
1752 rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1753 else
1754 {
1755 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1756 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1757 }
1758 }
1759 else
1760 {
1761 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1762 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1763
1764 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1765 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1766 rc = rc2;
1767 }
1768 }
1769
1770 return rc;
1771}
1772
1773
1774/**
1775 * Save current VM state.
1776 *
1777 * Can be used for both saving the state and creating snapshots.
1778 *
1779 * When called for a VM in the Running state, the saved state is created live
1780 * and the VM is only suspended when the final part of the saving is preformed.
1781 * The VM state will not be restored to Running in this case and it's up to the
1782 * caller to call VMR3Resume if this is desirable. (The rational is that the
1783 * caller probably wish to reconfigure the disks before resuming the VM.)
1784 *
1785 * @returns VBox status code.
1786 *
1787 * @param pVM The VM which state should be saved.
1788 * @param pszFilename The name of the save state file.
1789 * @param pStreamOps The stream methods.
1790 * @param pvStreamOpsUser The user argument to the stream methods.
1791 * @param fContinueAfterwards Whether continue execution afterwards or not.
1792 * When in doubt, set this to true.
1793 * @param pfnProgress Progress callback. Optional.
1794 * @param pvUser User argument for the progress callback.
1795 * @param pfSuspended Set if we suspended the VM.
1796 *
1797 * @thread Non-EMT.
1798 * @vmstate Suspended or Running
1799 * @vmstateto Saving+Suspended or
1800 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1801 */
1802VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
1803{
1804 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1805 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1806
1807 /*
1808 * Validate input.
1809 */
1810 AssertPtr(pfSuspended);
1811 *pfSuspended = false;
1812 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1813 VM_ASSERT_OTHER_THREAD(pVM);
1814 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1815 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1816 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1817
1818 /*
1819 * Join paths with VMR3Teleport.
1820 */
1821 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1822 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1823 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1824 enmAfter, pfnProgress, pvUser, pfSuspended,
1825 false /* fSkipStateChanges */);
1826 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1827 return rc;
1828}
1829
1830/**
1831 * Save current VM state (used by FTM)
1832 *
1833 * Can be used for both saving the state and creating snapshots.
1834 *
1835 * When called for a VM in the Running state, the saved state is created live
1836 * and the VM is only suspended when the final part of the saving is preformed.
1837 * The VM state will not be restored to Running in this case and it's up to the
1838 * caller to call VMR3Resume if this is desirable. (The rational is that the
1839 * caller probably wish to reconfigure the disks before resuming the VM.)
1840 *
1841 * @returns VBox status code.
1842 *
1843 * @param pVM The VM which state should be saved.
1844 * @param pStreamOps The stream methods.
1845 * @param pvStreamOpsUser The user argument to the stream methods.
1846 * @param pfSuspended Set if we suspended the VM.
1847 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1848 *
1849 * @thread Any
1850 * @vmstate Suspended or Running
1851 * @vmstateto Saving+Suspended or
1852 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1853 */
1854VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended,
1855 bool fSkipStateChanges)
1856{
1857 LogFlow(("VMR3SaveFT: pVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
1858 pVM, pStreamOps, pvStreamOpsUser, pfSuspended));
1859
1860 /*
1861 * Validate input.
1862 */
1863 AssertPtr(pfSuspended);
1864 *pfSuspended = false;
1865 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1866 AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
1867
1868 /*
1869 * Join paths with VMR3Teleport.
1870 */
1871 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1872 NULL, pStreamOps, pvStreamOpsUser,
1873 SSMAFTER_CONTINUE, NULL, NULL, pfSuspended,
1874 fSkipStateChanges);
1875 LogFlow(("VMR3SaveFT: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1876 return rc;
1877}
1878
1879
1880/**
1881 * Teleport the VM (aka live migration).
1882 *
1883 * @returns VBox status code.
1884 *
1885 * @param pVM The VM which state should be saved.
1886 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1887 * @param pStreamOps The stream methods.
1888 * @param pvStreamOpsUser The user argument to the stream methods.
1889 * @param pfnProgress Progress callback. Optional.
1890 * @param pvProgressUser User argument for the progress callback.
1891 * @param pfSuspended Set if we suspended the VM.
1892 *
1893 * @thread Non-EMT.
1894 * @vmstate Suspended or Running
1895 * @vmstateto Saving+Suspended or
1896 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1897 */
1898VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1899 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1900{
1901 LogFlow(("VMR3Teleport: pVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1902 pVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1903
1904 /*
1905 * Validate input.
1906 */
1907 AssertPtr(pfSuspended);
1908 *pfSuspended = false;
1909 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1910 VM_ASSERT_OTHER_THREAD(pVM);
1911 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1912 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1913
1914 /*
1915 * Join paths with VMR3Save.
1916 */
1917 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
1918 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1919 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended,
1920 false /* fSkipStateChanges */);
1921 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1922 return rc;
1923}
1924
1925
1926
1927/**
1928 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1929 *
1930 * @returns VBox status code.
1931 *
1932 * @param pVM The VM handle.
1933 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1934 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1935 * @param pvStreamOpsUser The user argument to the stream methods.
1936 * @param pfnProgress Progress callback. Optional.
1937 * @param pvUser User argument for the progress callback.
1938 * @param fTeleporting Indicates whether we're teleporting or not.
1939 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1940 *
1941 * @thread EMT.
1942 */
1943static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1944 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
1945 bool fSkipStateChanges)
1946{
1947 int rc = VINF_SUCCESS;
1948
1949 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1950 pVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1951
1952 /*
1953 * Validate input (paranoia).
1954 */
1955 AssertPtr(pVM);
1956 AssertPtrNull(pszFilename);
1957 AssertPtrNull(pStreamOps);
1958 AssertPtrNull(pfnProgress);
1959
1960 if (!fSkipStateChanges)
1961 {
1962 /*
1963 * Change the state and perform the load.
1964 *
1965 * Always perform a relocation round afterwards to make sure hypervisor
1966 * selectors and such are correct.
1967 */
1968 rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1969 VMSTATE_LOADING, VMSTATE_CREATED,
1970 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1971 if (RT_FAILURE(rc))
1972 return rc;
1973 }
1974 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1975
1976 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pVM);
1977 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1978 if (RT_SUCCESS(rc))
1979 {
1980 VMR3Relocate(pVM, 0 /*offDelta*/);
1981 if (!fSkipStateChanges)
1982 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1983 }
1984 else
1985 {
1986 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1987 if (!fSkipStateChanges)
1988 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1989
1990 if (cErrorsPriorToSave == VMR3GetErrorCount(pVM))
1991 rc = VMSetError(pVM, rc, RT_SRC_POS,
1992 N_("Unable to restore the virtual machine's saved state from '%s'. "
1993 "It may be damaged or from an older version of VirtualBox. "
1994 "Please discard the saved state before starting the virtual machine"),
1995 pszFilename);
1996 }
1997
1998 return rc;
1999}
2000
2001
2002/**
2003 * Loads a VM state into a newly created VM or a one that is suspended.
2004 *
2005 * To restore a saved state on VM startup, call this function and then resume
2006 * the VM instead of powering it on.
2007 *
2008 * @returns VBox status code.
2009 *
2010 * @param pVM The VM handle.
2011 * @param pszFilename The name of the save state file.
2012 * @param pfnProgress Progress callback. Optional.
2013 * @param pvUser User argument for the progress callback.
2014 *
2015 * @thread Any thread.
2016 * @vmstate Created, Suspended
2017 * @vmstateto Loading+Suspended
2018 */
2019VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
2020{
2021 LogFlow(("VMR3LoadFromFile: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
2022 pVM, pszFilename, pszFilename, pfnProgress, pvUser));
2023
2024 /*
2025 * Validate input.
2026 */
2027 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2028 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
2029
2030 /*
2031 * Forward the request to EMT(0). No need to setup a rendezvous here
2032 * since there is no execution taking place when this call is allowed.
2033 */
2034 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2035 pVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
2036 false /*fTeleporting*/, false /* fSkipStateChanges */);
2037 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
2038 return rc;
2039}
2040
2041
2042/**
2043 * VMR3LoadFromFile for arbitrary file streams.
2044 *
2045 * @returns VBox status code.
2046 *
2047 * @param pVM The VM handle.
2048 * @param pStreamOps The stream methods.
2049 * @param pvStreamOpsUser The user argument to the stream methods.
2050 * @param pfnProgress Progress callback. Optional.
2051 * @param pvProgressUser User argument for the progress callback.
2052 *
2053 * @thread Any thread.
2054 * @vmstate Created, Suspended
2055 * @vmstateto Loading+Suspended
2056 */
2057VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2058 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
2059{
2060 LogFlow(("VMR3LoadFromStream: pVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
2061 pVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2062
2063 /*
2064 * Validate input.
2065 */
2066 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2067 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2068
2069 /*
2070 * Forward the request to EMT(0). No need to setup a rendezvous here
2071 * since there is no execution taking place when this call is allowed.
2072 */
2073 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2074 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
2075 true /*fTeleporting*/, false /* fSkipStateChanges */);
2076 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2077 return rc;
2078}
2079
2080
2081/**
2082 * VMR3LoadFromFileFT for arbitrary file streams.
2083 *
2084 * @returns VBox status code.
2085 *
2086 * @param pVM The VM handle.
2087 * @param pStreamOps The stream methods.
2088 * @param pvStreamOpsUser The user argument to the stream methods.
2089 * @param pfnProgress Progress callback. Optional.
2090 * @param pvProgressUser User argument for the progress callback.
2091 *
2092 * @thread Any thread.
2093 * @vmstate Created, Suspended
2094 * @vmstateto Loading+Suspended
2095 */
2096VMMR3DECL(int) VMR3LoadFromStreamFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
2097{
2098 LogFlow(("VMR3LoadFromStreamFT: pVM=%p pStreamOps=%p pvStreamOpsUser=%p\n",
2099 pVM, pStreamOps, pvStreamOpsUser));
2100
2101 /*
2102 * Validate input.
2103 */
2104 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2105 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2106
2107 /*
2108 * Forward the request to EMT(0). No need to setup a rendezvous here
2109 * since there is no execution taking place when this call is allowed.
2110 */
2111 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2112 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
2113 true /*fTeleporting*/, true /* fSkipStateChanges */);
2114 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2115 return rc;
2116}
2117
2118/**
2119 * EMT rendezvous worker for VMR3PowerOff.
2120 *
2121 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
2122 * return code, see FNVMMEMTRENDEZVOUS.)
2123 *
2124 * @param pVM The VM handle.
2125 * @param pVCpu The VMCPU handle of the EMT.
2126 * @param pvUser Ignored.
2127 */
2128static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
2129{
2130 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
2131 Assert(!pvUser); NOREF(pvUser);
2132
2133 /*
2134 * The first EMT thru here will change the state to PoweringOff.
2135 */
2136 if (pVCpu->idCpu == pVM->cCpus - 1)
2137 {
2138 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
2139 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
2140 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
2141 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
2142 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
2143 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2144 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2145 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2146 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2147 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2148 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2149 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2150 if (RT_FAILURE(rc))
2151 return rc;
2152 if (rc >= 7)
2153 SSMR3Cancel(pVM);
2154 }
2155
2156 /*
2157 * Check the state.
2158 */
2159 VMSTATE enmVMState = VMR3GetState(pVM);
2160 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2161 || enmVMState == VMSTATE_POWERING_OFF_LS,
2162 ("%s\n", VMR3GetStateName(enmVMState)),
2163 VERR_VM_INVALID_VM_STATE);
2164
2165 /*
2166 * EMT(0) does the actual power off work here *after* all the other EMTs
2167 * have been thru and entered the STOPPED state.
2168 */
2169 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2170 if (pVCpu->idCpu == 0)
2171 {
2172 /*
2173 * For debugging purposes, we will log a summary of the guest state at this point.
2174 */
2175 if (enmVMState != VMSTATE_GURU_MEDITATION)
2176 {
2177 /** @todo SMP support? */
2178 /** @todo make the state dumping at VMR3PowerOff optional. */
2179 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2180 RTLogRelPrintf("****************** Guest state at power off ******************\n");
2181 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2182 RTLogRelPrintf("***\n");
2183 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
2184 RTLogRelPrintf("***\n");
2185 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2186 RTLogRelPrintf("***\n");
2187 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2188 /** @todo dump guest call stack. */
2189#if 1 // "temporary" while debugging #1589
2190 RTLogRelPrintf("***\n");
2191 uint32_t esp = CPUMGetGuestESP(pVCpu);
2192 if ( CPUMGetGuestSS(pVCpu) == 0
2193 && esp < _64K)
2194 {
2195 uint8_t abBuf[PAGE_SIZE];
2196 RTLogRelPrintf("***\n"
2197 "ss:sp=0000:%04x ", esp);
2198 uint32_t Start = esp & ~(uint32_t)63;
2199 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
2200 if (RT_SUCCESS(rc))
2201 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
2202 "%.*Rhxd\n",
2203 Start, Start + 0x100 - 1,
2204 0x100, abBuf);
2205 else
2206 RTLogRelPrintf("rc=%Rrc\n", rc);
2207
2208 /* grub ... */
2209 if (esp < 0x2000 && esp > 0x1fc0)
2210 {
2211 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
2212 if (RT_SUCCESS(rc))
2213 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
2214 "%.*Rhxd\n",
2215 0x800, abBuf);
2216 }
2217 /* microsoft cdrom hang ... */
2218 if (true)
2219 {
2220 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
2221 if (RT_SUCCESS(rc))
2222 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
2223 "%.*Rhxd\n",
2224 0x200, abBuf);
2225 }
2226 }
2227#endif
2228 RTLogRelSetBuffering(fOldBuffered);
2229 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2230 }
2231
2232 /*
2233 * Perform the power off notifications and advance the state to
2234 * Off or OffLS.
2235 */
2236 PDMR3PowerOff(pVM);
2237
2238 PUVM pUVM = pVM->pUVM;
2239 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2240 enmVMState = pVM->enmVMState;
2241 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2242 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
2243 else
2244 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF);
2245 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2246 }
2247 return VINF_EM_OFF;
2248}
2249
2250
2251/**
2252 * Power off the VM.
2253 *
2254 * @returns VBox status code. When called on EMT, this will be a strict status
2255 * code that has to be propagated up the call stack.
2256 *
2257 * @param pVM The handle of the VM to be powered off.
2258 *
2259 * @thread Any thread.
2260 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2261 * @vmstateto Off or OffLS
2262 */
2263VMMR3DECL(int) VMR3PowerOff(PVM pVM)
2264{
2265 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
2266 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2267
2268 /*
2269 * Gather all the EMTs to make sure there are no races before
2270 * changing the VM state.
2271 */
2272 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2273 vmR3PowerOff, NULL);
2274 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2275 return rc;
2276}
2277
2278
2279/**
2280 * Destroys the VM.
2281 *
2282 * The VM must be powered off (or never really powered on) to call this
2283 * function. The VM handle is destroyed and can no longer be used up successful
2284 * return.
2285 *
2286 * @returns VBox status code.
2287 *
2288 * @param pVM The handle of the VM which should be destroyed.
2289 *
2290 * @thread Any none emulation thread.
2291 * @vmstate Off, Created
2292 * @vmstateto N/A
2293 */
2294VMMR3DECL(int) VMR3Destroy(PVM pVM)
2295{
2296 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
2297
2298 /*
2299 * Validate input.
2300 */
2301 if (!pVM)
2302 return VERR_INVALID_VM_HANDLE;
2303 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2304 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2305
2306 /*
2307 * Change VM state to destroying and unlink the VM.
2308 */
2309 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2310 if (RT_FAILURE(rc))
2311 return rc;
2312
2313 /** @todo lock this when we start having multiple machines in a process... */
2314 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
2315 if (g_pUVMsHead == pUVM)
2316 g_pUVMsHead = pUVM->pNext;
2317 else
2318 {
2319 PUVM pPrev = g_pUVMsHead;
2320 while (pPrev && pPrev->pNext != pUVM)
2321 pPrev = pPrev->pNext;
2322 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
2323
2324 pPrev->pNext = pUVM->pNext;
2325 }
2326 pUVM->pNext = NULL;
2327
2328 /*
2329 * Notify registered at destruction listeners.
2330 */
2331 vmR3AtDtor(pVM);
2332
2333 /*
2334 * Call vmR3Destroy on each of the EMTs ending with EMT(0) doing the bulk
2335 * of the cleanup.
2336 */
2337 /* vmR3Destroy on all EMTs, ending with EMT(0). */
2338 rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2339 AssertLogRelRC(rc);
2340
2341 /* Wait for EMTs and destroy the UVM. */
2342 vmR3DestroyUVM(pUVM, 30000);
2343
2344 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2345 return VINF_SUCCESS;
2346}
2347
2348
2349/**
2350 * Internal destruction worker.
2351 *
2352 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2353 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2354 * VMR3Destroy().
2355 *
2356 * When called on EMT(0), it will performed the great bulk of the destruction.
2357 * When called on the other EMTs, they will do nothing and the whole purpose is
2358 * to return VINF_EM_TERMINATE so they break out of their run loops.
2359 *
2360 * @returns VINF_EM_TERMINATE.
2361 * @param pVM The VM handle.
2362 */
2363DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2364{
2365 PUVM pUVM = pVM->pUVM;
2366 PVMCPU pVCpu = VMMGetCpu(pVM);
2367 Assert(pVCpu);
2368 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2369
2370 /*
2371 * Only VCPU 0 does the full cleanup (last).
2372 */
2373 if (pVCpu->idCpu == 0)
2374 {
2375 /*
2376 * Dump statistics to the log.
2377 */
2378#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2379 RTLogFlags(NULL, "nodisabled nobuffered");
2380#endif
2381#ifdef VBOX_WITH_STATISTICS
2382 STAMR3Dump(pVM, "*");
2383#else
2384 LogRel(("************************* Statistics *************************\n"));
2385 STAMR3DumpToReleaseLog(pVM, "*");
2386 LogRel(("********************* End of statistics **********************\n"));
2387#endif
2388
2389 /*
2390 * Destroy the VM components.
2391 */
2392 int rc = TMR3Term(pVM);
2393 AssertRC(rc);
2394#ifdef VBOX_WITH_DEBUGGER
2395 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
2396 pUVM->vm.s.pvDBGC = NULL;
2397#endif
2398 AssertRC(rc);
2399 rc = FTMR3Term(pVM);
2400 AssertRC(rc);
2401 rc = DBGFR3Term(pVM);
2402 AssertRC(rc);
2403 rc = PDMR3Term(pVM);
2404 AssertRC(rc);
2405 rc = IEMR3Term(pVM);
2406 AssertRC(rc);
2407 rc = EMR3Term(pVM);
2408 AssertRC(rc);
2409 rc = IOMR3Term(pVM);
2410 AssertRC(rc);
2411 rc = CSAMR3Term(pVM);
2412 AssertRC(rc);
2413 rc = PATMR3Term(pVM);
2414 AssertRC(rc);
2415 rc = TRPMR3Term(pVM);
2416 AssertRC(rc);
2417 rc = SELMR3Term(pVM);
2418 AssertRC(rc);
2419 rc = REMR3Term(pVM);
2420 AssertRC(rc);
2421 rc = HWACCMR3Term(pVM);
2422 AssertRC(rc);
2423 rc = PGMR3Term(pVM);
2424 AssertRC(rc);
2425 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2426 AssertRC(rc);
2427 rc = CPUMR3Term(pVM);
2428 AssertRC(rc);
2429 SSMR3Term(pVM);
2430 rc = PDMR3CritSectTerm(pVM);
2431 AssertRC(rc);
2432 rc = MMR3Term(pVM);
2433 AssertRC(rc);
2434
2435 /*
2436 * We're done, tell the other EMTs to quit.
2437 */
2438 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2439 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2440 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2441 }
2442 return VINF_EM_TERMINATE;
2443}
2444
2445
2446/**
2447 * Destroys the UVM portion.
2448 *
2449 * This is called as the final step in the VM destruction or as the cleanup
2450 * in case of a creation failure.
2451 *
2452 * @param pVM VM Handle.
2453 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2454 * threads.
2455 */
2456static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2457{
2458 /*
2459 * Signal termination of each the emulation threads and
2460 * wait for them to complete.
2461 */
2462 /* Signal them. */
2463 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2464 if (pUVM->pVM)
2465 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2466 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2467 {
2468 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2469 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2470 }
2471
2472 /* Wait for them. */
2473 uint64_t NanoTS = RTTimeNanoTS();
2474 RTTHREAD hSelf = RTThreadSelf();
2475 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2476 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2477 {
2478 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2479 if ( hThread != NIL_RTTHREAD
2480 && hThread != hSelf)
2481 {
2482 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2483 int rc2 = RTThreadWait(hThread,
2484 cMilliesElapsed < cMilliesEMTWait
2485 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2486 : 2000,
2487 NULL);
2488 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2489 rc2 = RTThreadWait(hThread, 1000, NULL);
2490 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2491 if (RT_SUCCESS(rc2))
2492 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2493 }
2494 }
2495
2496 /* Cleanup the semaphores. */
2497 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2498 {
2499 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2500 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2501 }
2502
2503 /*
2504 * Free the event semaphores associated with the request packets.
2505 */
2506 unsigned cReqs = 0;
2507 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2508 {
2509 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2510 pUVM->vm.s.apReqFree[i] = NULL;
2511 for (; pReq; pReq = pReq->pNext, cReqs++)
2512 {
2513 pReq->enmState = VMREQSTATE_INVALID;
2514 RTSemEventDestroy(pReq->EventSem);
2515 }
2516 }
2517 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2518
2519 /*
2520 * Kill all queued requests. (There really shouldn't be any!)
2521 */
2522 for (unsigned i = 0; i < 10; i++)
2523 {
2524 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pReqs, NULL, PVMREQ);
2525 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2526 if (!pReqHead)
2527 break;
2528 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2529 {
2530 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_INTERNAL_ERROR);
2531 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2532 RTSemEventSignal(pReq->EventSem);
2533 RTThreadSleep(2);
2534 RTSemEventDestroy(pReq->EventSem);
2535 }
2536 /* give them a chance to respond before we free the request memory. */
2537 RTThreadSleep(32);
2538 }
2539
2540 /*
2541 * Now all queued VCPU requests (again, there shouldn't be any).
2542 */
2543 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2544 {
2545 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2546
2547 for (unsigned i = 0; i < 10; i++)
2548 {
2549 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pReqs, NULL, PVMREQ);
2550 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2551 if (!pReqHead)
2552 break;
2553 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2554 {
2555 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_INTERNAL_ERROR);
2556 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2557 RTSemEventSignal(pReq->EventSem);
2558 RTThreadSleep(2);
2559 RTSemEventDestroy(pReq->EventSem);
2560 }
2561 /* give them a chance to respond before we free the request memory. */
2562 RTThreadSleep(32);
2563 }
2564 }
2565
2566 /*
2567 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2568 */
2569 PDMR3TermUVM(pUVM);
2570
2571 /*
2572 * Terminate the support library if initialized.
2573 */
2574 if (pUVM->vm.s.pSession)
2575 {
2576 int rc = SUPR3Term(false /*fForced*/);
2577 AssertRC(rc);
2578 pUVM->vm.s.pSession = NIL_RTR0PTR;
2579 }
2580
2581 /*
2582 * Release the UVM structure reference.
2583 */
2584 VMR3ReleaseUVM(pUVM);
2585
2586 /*
2587 * Clean up and flush logs.
2588 */
2589#ifdef LOG_ENABLED
2590 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2591#endif
2592 RTLogFlush(NULL);
2593}
2594
2595
2596/**
2597 * Enumerates the VMs in this process.
2598 *
2599 * @returns Pointer to the next VM.
2600 * @returns NULL when no more VMs.
2601 * @param pVMPrev The previous VM
2602 * Use NULL to start the enumeration.
2603 */
2604VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2605{
2606 /*
2607 * This is quick and dirty. It has issues with VM being
2608 * destroyed during the enumeration.
2609 */
2610 PUVM pNext;
2611 if (pVMPrev)
2612 pNext = pVMPrev->pUVM->pNext;
2613 else
2614 pNext = g_pUVMsHead;
2615 return pNext ? pNext->pVM : NULL;
2616}
2617
2618
2619/**
2620 * Registers an at VM destruction callback.
2621 *
2622 * @returns VBox status code.
2623 * @param pfnAtDtor Pointer to callback.
2624 * @param pvUser User argument.
2625 */
2626VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2627{
2628 /*
2629 * Check if already registered.
2630 */
2631 VM_ATDTOR_LOCK();
2632 PVMATDTOR pCur = g_pVMAtDtorHead;
2633 while (pCur)
2634 {
2635 if (pfnAtDtor == pCur->pfnAtDtor)
2636 {
2637 VM_ATDTOR_UNLOCK();
2638 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2639 return VERR_INVALID_PARAMETER;
2640 }
2641
2642 /* next */
2643 pCur = pCur->pNext;
2644 }
2645 VM_ATDTOR_UNLOCK();
2646
2647 /*
2648 * Allocate new entry.
2649 */
2650 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2651 if (!pVMAtDtor)
2652 return VERR_NO_MEMORY;
2653
2654 VM_ATDTOR_LOCK();
2655 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2656 pVMAtDtor->pvUser = pvUser;
2657 pVMAtDtor->pNext = g_pVMAtDtorHead;
2658 g_pVMAtDtorHead = pVMAtDtor;
2659 VM_ATDTOR_UNLOCK();
2660
2661 return VINF_SUCCESS;
2662}
2663
2664
2665/**
2666 * Deregisters an at VM destruction callback.
2667 *
2668 * @returns VBox status code.
2669 * @param pfnAtDtor Pointer to callback.
2670 */
2671VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2672{
2673 /*
2674 * Find it, unlink it and free it.
2675 */
2676 VM_ATDTOR_LOCK();
2677 PVMATDTOR pPrev = NULL;
2678 PVMATDTOR pCur = g_pVMAtDtorHead;
2679 while (pCur)
2680 {
2681 if (pfnAtDtor == pCur->pfnAtDtor)
2682 {
2683 if (pPrev)
2684 pPrev->pNext = pCur->pNext;
2685 else
2686 g_pVMAtDtorHead = pCur->pNext;
2687 pCur->pNext = NULL;
2688 VM_ATDTOR_UNLOCK();
2689
2690 RTMemFree(pCur);
2691 return VINF_SUCCESS;
2692 }
2693
2694 /* next */
2695 pPrev = pCur;
2696 pCur = pCur->pNext;
2697 }
2698 VM_ATDTOR_UNLOCK();
2699
2700 return VERR_INVALID_PARAMETER;
2701}
2702
2703
2704/**
2705 * Walks the list of at VM destructor callbacks.
2706 * @param pVM The VM which is about to be destroyed.
2707 */
2708static void vmR3AtDtor(PVM pVM)
2709{
2710 /*
2711 * Find it, unlink it and free it.
2712 */
2713 VM_ATDTOR_LOCK();
2714 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2715 pCur->pfnAtDtor(pVM, pCur->pvUser);
2716 VM_ATDTOR_UNLOCK();
2717}
2718
2719
2720/**
2721 * Worker which checks integrity of some internal structures.
2722 * This is yet another attempt to track down that AVL tree crash.
2723 */
2724static void vmR3CheckIntegrity(PVM pVM)
2725{
2726#ifdef VBOX_STRICT
2727 int rc = PGMR3CheckIntegrity(pVM);
2728 AssertReleaseRC(rc);
2729#endif
2730}
2731
2732
2733/**
2734 * EMT rendezvous worker for VMR3Reset.
2735 *
2736 * This is called by the emulation threads as a response to the reset request
2737 * issued by VMR3Reset().
2738 *
2739 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2740 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2741 *
2742 * @param pVM The VM handle.
2743 * @param pVCpu The VMCPU handle of the EMT.
2744 * @param pvUser Ignored.
2745 */
2746static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2747{
2748 Assert(!pvUser); NOREF(pvUser);
2749
2750 /*
2751 * The first EMT will try change the state to resetting. If this fails,
2752 * we won't get called for the other EMTs.
2753 */
2754 if (pVCpu->idCpu == pVM->cCpus - 1)
2755 {
2756 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3,
2757 VMSTATE_RESETTING, VMSTATE_RUNNING,
2758 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2759 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2760 if (RT_FAILURE(rc))
2761 return rc;
2762 }
2763
2764 /*
2765 * Check the state.
2766 */
2767 VMSTATE enmVMState = VMR3GetState(pVM);
2768 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2769 || enmVMState == VMSTATE_RESETTING_LS,
2770 ("%s\n", VMR3GetStateName(enmVMState)),
2771 VERR_INTERNAL_ERROR_4);
2772
2773 /*
2774 * EMT(0) does the full cleanup *after* all the other EMTs has been
2775 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2776 *
2777 * Because there are per-cpu reset routines and order may/is important,
2778 * the following sequence looks a bit ugly...
2779 */
2780 if (pVCpu->idCpu == 0)
2781 vmR3CheckIntegrity(pVM);
2782
2783 /* Reset the VCpu state. */
2784 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2785
2786 /* Clear all pending forced actions. */
2787 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2788
2789 /*
2790 * Reset the VM components.
2791 */
2792 if (pVCpu->idCpu == 0)
2793 {
2794 PATMR3Reset(pVM);
2795 CSAMR3Reset(pVM);
2796 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2797 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2798/** @todo PGMR3Reset should be called after PDMR3Reset really, because we'll trash OS <-> hardware
2799 * communication structures residing in RAM when done in the other order. I.e. the device must be
2800 * quiesced first, then we clear the memory and plan tables. Probably have to make these things
2801 * explicit in some way, some memory setup pass or something.
2802 * (Example: DevAHCI may assert if memory is zeroed before it has read the FIS.)
2803 *
2804 * @bugref{4467}
2805 */
2806 MMR3Reset(pVM);
2807 PDMR3Reset(pVM);
2808 SELMR3Reset(pVM);
2809 TRPMR3Reset(pVM);
2810 REMR3Reset(pVM);
2811 IOMR3Reset(pVM);
2812 CPUMR3Reset(pVM);
2813 }
2814 CPUMR3ResetCpu(pVCpu);
2815 if (pVCpu->idCpu == 0)
2816 {
2817 TMR3Reset(pVM);
2818 EMR3Reset(pVM);
2819 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2820
2821#ifdef LOG_ENABLED
2822 /*
2823 * Debug logging.
2824 */
2825 RTLogPrintf("\n\nThe VM was reset:\n");
2826 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2827#endif
2828
2829 /*
2830 * Since EMT(0) is the last to go thru here, it will advance the state.
2831 * When a live save is active, we will move on to SuspendingLS but
2832 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2833 */
2834 PUVM pUVM = pVM->pUVM;
2835 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2836 enmVMState = pVM->enmVMState;
2837 if (enmVMState == VMSTATE_RESETTING)
2838 {
2839 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2840 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2841 else
2842 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING);
2843 }
2844 else
2845 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS);
2846 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2847
2848 vmR3CheckIntegrity(pVM);
2849
2850 /*
2851 * Do the suspend bit as well.
2852 * It only requires some EMT(0) work at present.
2853 */
2854 if (enmVMState != VMSTATE_RESETTING)
2855 {
2856 vmR3SuspendDoWork(pVM);
2857 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2858 }
2859 }
2860
2861 return enmVMState == VMSTATE_RESETTING
2862 ? VINF_EM_RESET
2863 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2864}
2865
2866
2867/**
2868 * Reset the current VM.
2869 *
2870 * @returns VBox status code.
2871 * @param pVM VM to reset.
2872 */
2873VMMR3DECL(int) VMR3Reset(PVM pVM)
2874{
2875 LogFlow(("VMR3Reset:\n"));
2876 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2877
2878 /*
2879 * Gather all the EMTs to make sure there are no races before
2880 * changing the VM state.
2881 */
2882 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2883 vmR3Reset, NULL);
2884 LogFlow(("VMR3Reset: returns %Rrc\n", rc));
2885 return rc;
2886}
2887
2888
2889/**
2890 * Gets the user mode VM structure pointer given the VM handle.
2891 *
2892 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
2893 * invalid (asserted).
2894 * @param pVM The VM handle.
2895 * @sa VMR3GetVM, VMR3RetainUVM
2896 */
2897VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
2898{
2899 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
2900 return pVM->pUVM;
2901}
2902
2903
2904/**
2905 * Gets the shared VM structure pointer given the pointer to the user mode VM
2906 * structure.
2907 *
2908 * @returns Pointer to the shared VM structure.
2909 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
2910 * is currently associated with it.
2911 * @param pUVM The user mode VM handle.
2912 * @sa VMR3GetUVM
2913 */
2914VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
2915{
2916 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2917 return pUVM->pVM;
2918}
2919
2920
2921/**
2922 * Retain the user mode VM handle.
2923 *
2924 * @returns Reference count.
2925 * UINT32_MAX if @a pUVM is invalid.
2926 *
2927 * @param pUVM The user mode VM handle.
2928 * @sa VMR3ReleaseUVM
2929 */
2930VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
2931{
2932 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2933 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
2934 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
2935 return cRefs;
2936}
2937
2938
2939/**
2940 * Does the final release of the UVM structure.
2941 *
2942 * @param pUVM The user mode VM handle.
2943 */
2944static void vmR3DoReleaseUVM(PUVM pUVM)
2945{
2946 /*
2947 * Free the UVM.
2948 */
2949 Assert(!pUVM->pVM);
2950
2951 MMR3TermUVM(pUVM);
2952 STAMR3TermUVM(pUVM);
2953
2954 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2955 RTTlsFree(pUVM->vm.s.idxTLS);
2956 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
2957}
2958
2959
2960/**
2961 * Releases a refernece to the mode VM handle.
2962 *
2963 * @returns The new reference count, 0 if destroyed.
2964 * UINT32_MAX if @a pUVM is invalid.
2965 *
2966 * @param pUVM The user mode VM handle.
2967 * @sa VMR3RetainUVM
2968 */
2969VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
2970{
2971 if (!pUVM)
2972 return 0;
2973 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2974 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
2975 if (!cRefs)
2976 vmR3DoReleaseUVM(pUVM);
2977 else
2978 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
2979 return cRefs;
2980}
2981
2982
2983/**
2984 * Gets the VM name.
2985 *
2986 * @returns Pointer to a read-only string containing the name. NULL if called
2987 * too early.
2988 * @param pUVM The user mode VM handle.
2989 */
2990VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
2991{
2992 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2993 return pUVM->vm.s.pszName;
2994}
2995
2996
2997/**
2998 * Gets the VM UUID.
2999 *
3000 * @returns pUuid on success, NULL on failure.
3001 * @param pUVM The user mode VM handle.
3002 * @param pUuid Where to store the UUID.
3003 */
3004VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
3005{
3006 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3007 AssertPtrReturn(pUuid, NULL);
3008
3009 *pUuid = pUVM->vm.s.Uuid;
3010 return pUuid;
3011}
3012
3013
3014/**
3015 * Gets the current VM state.
3016 *
3017 * @returns The current VM state.
3018 * @param pVM VM handle.
3019 * @thread Any
3020 */
3021VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
3022{
3023 VM_ASSERT_VALID_EXT_RETURN(pVM, VMSTATE_TERMINATED);
3024 return pVM->enmVMState;
3025}
3026
3027
3028/**
3029 * Gets the current VM state.
3030 *
3031 * @returns The current VM state.
3032 * @param pUVM The user-mode VM handle.
3033 * @thread Any
3034 */
3035VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
3036{
3037 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
3038 if (RT_UNLIKELY(!pUVM->pVM))
3039 return VMSTATE_TERMINATED;
3040 return pUVM->pVM->enmVMState;
3041}
3042
3043
3044/**
3045 * Gets the state name string for a VM state.
3046 *
3047 * @returns Pointer to the state name. (readonly)
3048 * @param enmState The state.
3049 */
3050VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
3051{
3052 switch (enmState)
3053 {
3054 case VMSTATE_CREATING: return "CREATING";
3055 case VMSTATE_CREATED: return "CREATED";
3056 case VMSTATE_LOADING: return "LOADING";
3057 case VMSTATE_POWERING_ON: return "POWERING_ON";
3058 case VMSTATE_RESUMING: return "RESUMING";
3059 case VMSTATE_RUNNING: return "RUNNING";
3060 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
3061 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
3062 case VMSTATE_RESETTING: return "RESETTING";
3063 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
3064 case VMSTATE_SUSPENDED: return "SUSPENDED";
3065 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
3066 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
3067 case VMSTATE_SUSPENDING: return "SUSPENDING";
3068 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
3069 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
3070 case VMSTATE_SAVING: return "SAVING";
3071 case VMSTATE_DEBUGGING: return "DEBUGGING";
3072 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
3073 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
3074 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
3075 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
3076 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
3077 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
3078 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
3079 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
3080 case VMSTATE_OFF: return "OFF";
3081 case VMSTATE_OFF_LS: return "OFF_LS";
3082 case VMSTATE_DESTROYING: return "DESTROYING";
3083 case VMSTATE_TERMINATED: return "TERMINATED";
3084
3085 default:
3086 AssertMsgFailed(("Unknown state %d\n", enmState));
3087 return "Unknown!\n";
3088 }
3089}
3090
3091
3092/**
3093 * Validates the state transition in strict builds.
3094 *
3095 * @returns true if valid, false if not.
3096 *
3097 * @param enmStateOld The old (current) state.
3098 * @param enmStateNew The proposed new state.
3099 *
3100 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
3101 * diagram (under State Machine Diagram).
3102 */
3103static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
3104{
3105#ifdef VBOX_STRICT
3106 switch (enmStateOld)
3107 {
3108 case VMSTATE_CREATING:
3109 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3110 break;
3111
3112 case VMSTATE_CREATED:
3113 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
3114 || enmStateNew == VMSTATE_POWERING_ON
3115 || enmStateNew == VMSTATE_POWERING_OFF
3116 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3117 break;
3118
3119 case VMSTATE_LOADING:
3120 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3121 || enmStateNew == VMSTATE_LOAD_FAILURE
3122 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3123 break;
3124
3125 case VMSTATE_POWERING_ON:
3126 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3127 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3128 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3129 break;
3130
3131 case VMSTATE_RESUMING:
3132 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3133 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3134 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3135 break;
3136
3137 case VMSTATE_RUNNING:
3138 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3139 || enmStateNew == VMSTATE_SUSPENDING
3140 || enmStateNew == VMSTATE_RESETTING
3141 || enmStateNew == VMSTATE_RUNNING_LS
3142 || enmStateNew == VMSTATE_RUNNING_FT
3143 || enmStateNew == VMSTATE_DEBUGGING
3144 || enmStateNew == VMSTATE_FATAL_ERROR
3145 || enmStateNew == VMSTATE_GURU_MEDITATION
3146 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3147 break;
3148
3149 case VMSTATE_RUNNING_LS:
3150 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3151 || enmStateNew == VMSTATE_SUSPENDING_LS
3152 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3153 || enmStateNew == VMSTATE_RESETTING_LS
3154 || enmStateNew == VMSTATE_RUNNING
3155 || enmStateNew == VMSTATE_DEBUGGING_LS
3156 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3157 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3158 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3159 break;
3160
3161 case VMSTATE_RUNNING_FT:
3162 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3163 || enmStateNew == VMSTATE_FATAL_ERROR
3164 || enmStateNew == VMSTATE_GURU_MEDITATION
3165 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3166 break;
3167
3168 case VMSTATE_RESETTING:
3169 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3170 break;
3171
3172 case VMSTATE_RESETTING_LS:
3173 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3174 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3175 break;
3176
3177 case VMSTATE_SUSPENDING:
3178 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3179 break;
3180
3181 case VMSTATE_SUSPENDING_LS:
3182 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3183 || enmStateNew == VMSTATE_SUSPENDED_LS
3184 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3185 break;
3186
3187 case VMSTATE_SUSPENDING_EXT_LS:
3188 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3189 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3190 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3191 break;
3192
3193 case VMSTATE_SUSPENDED:
3194 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3195 || enmStateNew == VMSTATE_SAVING
3196 || enmStateNew == VMSTATE_RESETTING
3197 || enmStateNew == VMSTATE_RESUMING
3198 || enmStateNew == VMSTATE_LOADING
3199 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3200 break;
3201
3202 case VMSTATE_SUSPENDED_LS:
3203 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3204 || enmStateNew == VMSTATE_SAVING
3205 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3206 break;
3207
3208 case VMSTATE_SUSPENDED_EXT_LS:
3209 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3210 || enmStateNew == VMSTATE_SAVING
3211 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3212 break;
3213
3214 case VMSTATE_SAVING:
3215 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3216 break;
3217
3218 case VMSTATE_DEBUGGING:
3219 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3220 || enmStateNew == VMSTATE_POWERING_OFF
3221 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3222 break;
3223
3224 case VMSTATE_DEBUGGING_LS:
3225 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3226 || enmStateNew == VMSTATE_RUNNING_LS
3227 || enmStateNew == VMSTATE_POWERING_OFF_LS
3228 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3229 break;
3230
3231 case VMSTATE_POWERING_OFF:
3232 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3233 break;
3234
3235 case VMSTATE_POWERING_OFF_LS:
3236 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3237 || enmStateNew == VMSTATE_OFF_LS
3238 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3239 break;
3240
3241 case VMSTATE_OFF:
3242 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3243 break;
3244
3245 case VMSTATE_OFF_LS:
3246 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3247 break;
3248
3249 case VMSTATE_FATAL_ERROR:
3250 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3251 break;
3252
3253 case VMSTATE_FATAL_ERROR_LS:
3254 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3255 || enmStateNew == VMSTATE_POWERING_OFF_LS
3256 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3257 break;
3258
3259 case VMSTATE_GURU_MEDITATION:
3260 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3261 || enmStateNew == VMSTATE_POWERING_OFF
3262 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3263 break;
3264
3265 case VMSTATE_GURU_MEDITATION_LS:
3266 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3267 || enmStateNew == VMSTATE_DEBUGGING_LS
3268 || enmStateNew == VMSTATE_POWERING_OFF_LS
3269 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3270 break;
3271
3272 case VMSTATE_LOAD_FAILURE:
3273 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3274 break;
3275
3276 case VMSTATE_DESTROYING:
3277 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3278 break;
3279
3280 case VMSTATE_TERMINATED:
3281 default:
3282 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3283 break;
3284 }
3285#endif /* VBOX_STRICT */
3286 return true;
3287}
3288
3289
3290/**
3291 * Does the state change callouts.
3292 *
3293 * The caller owns the AtStateCritSect.
3294 *
3295 * @param pVM The VM handle.
3296 * @param pUVM The UVM handle.
3297 * @param enmStateNew The New state.
3298 * @param enmStateOld The old state.
3299 */
3300static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3301{
3302 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3303
3304 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3305 {
3306 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
3307 if ( enmStateNew != VMSTATE_DESTROYING
3308 && pVM->enmVMState == VMSTATE_DESTROYING)
3309 break;
3310 AssertMsg(pVM->enmVMState == enmStateNew,
3311 ("You are not allowed to change the state while in the change callback, except "
3312 "from destroying the VM. There are restrictions in the way the state changes "
3313 "are propagated up to the EM execution loop and it makes the program flow very "
3314 "difficult to follow. (%s, expected %s, old %s)\n",
3315 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3316 VMR3GetStateName(enmStateOld)));
3317 }
3318}
3319
3320
3321/**
3322 * Sets the current VM state, with the AtStatCritSect already entered.
3323 *
3324 * @param pVM The VM handle.
3325 * @param pUVM The UVM handle.
3326 * @param enmStateNew The new state.
3327 * @param enmStateOld The old state.
3328 */
3329static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3330{
3331 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3332
3333 AssertMsg(pVM->enmVMState == enmStateOld,
3334 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3335 pUVM->vm.s.enmPrevVMState = enmStateOld;
3336 pVM->enmVMState = enmStateNew;
3337 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3338
3339 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3340}
3341
3342
3343/**
3344 * Sets the current VM state.
3345 *
3346 * @param pVM VM handle.
3347 * @param enmStateNew The new state.
3348 * @param enmStateOld The old state (for asserting only).
3349 */
3350static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3351{
3352 PUVM pUVM = pVM->pUVM;
3353 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3354
3355 AssertMsg(pVM->enmVMState == enmStateOld,
3356 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3357 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
3358
3359 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3360}
3361
3362
3363/**
3364 * Tries to perform a state transition.
3365 *
3366 * @returns The 1-based ordinal of the succeeding transition.
3367 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3368 *
3369 * @param pVM The VM handle.
3370 * @param pszWho Who is trying to change it.
3371 * @param cTransitions The number of transitions in the ellipsis.
3372 * @param ... Transition pairs; new, old.
3373 */
3374static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3375{
3376 va_list va;
3377 VMSTATE enmStateNew = VMSTATE_CREATED;
3378 VMSTATE enmStateOld = VMSTATE_CREATED;
3379
3380#ifdef VBOX_STRICT
3381 /*
3382 * Validate the input first.
3383 */
3384 va_start(va, cTransitions);
3385 for (unsigned i = 0; i < cTransitions; i++)
3386 {
3387 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3388 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3389 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3390 }
3391 va_end(va);
3392#endif
3393
3394 /*
3395 * Grab the lock and see if any of the proposed transitions works out.
3396 */
3397 va_start(va, cTransitions);
3398 int rc = VERR_VM_INVALID_VM_STATE;
3399 PUVM pUVM = pVM->pUVM;
3400 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3401
3402 VMSTATE enmStateCur = pVM->enmVMState;
3403
3404 for (unsigned i = 0; i < cTransitions; i++)
3405 {
3406 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3407 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3408 if (enmStateCur == enmStateOld)
3409 {
3410 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
3411 rc = i + 1;
3412 break;
3413 }
3414 }
3415
3416 if (RT_FAILURE(rc))
3417 {
3418 /*
3419 * Complain about it.
3420 */
3421 if (cTransitions == 1)
3422 {
3423 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3424 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3425 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3426 N_("%s failed because the VM state is %s instead of %s"),
3427 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3428 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3429 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3430 }
3431 else
3432 {
3433 va_end(va);
3434 va_start(va, cTransitions);
3435 LogRel(("%s:\n", pszWho));
3436 for (unsigned i = 0; i < cTransitions; i++)
3437 {
3438 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3439 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3440 LogRel(("%s%s -> %s",
3441 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3442 }
3443 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3444 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3445 N_("%s failed because the current VM state, %s, was not found in the state transition table"),
3446 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3447 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3448 pszWho, VMR3GetStateName(enmStateCur)));
3449 }
3450 }
3451
3452 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3453 va_end(va);
3454 Assert(rc > 0 || rc < 0);
3455 return rc;
3456}
3457
3458
3459/**
3460 * Flag a guru meditation ... a hack.
3461 *
3462 * @param pVM The VM handle
3463 *
3464 * @todo Rewrite this part. The guru meditation should be flagged
3465 * immediately by the VMM and not by VMEmt.cpp when it's all over.
3466 */
3467void vmR3SetGuruMeditation(PVM pVM)
3468{
3469 PUVM pUVM = pVM->pUVM;
3470 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3471
3472 VMSTATE enmStateCur = pVM->enmVMState;
3473 if (enmStateCur == VMSTATE_RUNNING)
3474 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING);
3475 else if (enmStateCur == VMSTATE_RUNNING_LS)
3476 {
3477 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
3478 SSMR3Cancel(pVM);
3479 }
3480
3481 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3482}
3483
3484
3485/**
3486 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3487 *
3488 * @param pVM The VM handle.
3489 */
3490void vmR3SetTerminated(PVM pVM)
3491{
3492 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3493}
3494
3495
3496/**
3497 * Checks if the VM was teleported and hasn't been fully resumed yet.
3498 *
3499 * This applies to both sides of the teleportation since we may leave a working
3500 * clone behind and the user is allowed to resume this...
3501 *
3502 * @returns true / false.
3503 * @param pVM The VM handle.
3504 * @thread Any thread.
3505 */
3506VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3507{
3508 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3509 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3510}
3511
3512
3513/**
3514 * Registers a VM state change callback.
3515 *
3516 * You are not allowed to call any function which changes the VM state from a
3517 * state callback.
3518 *
3519 * @returns VBox status code.
3520 * @param pVM VM handle.
3521 * @param pfnAtState Pointer to callback.
3522 * @param pvUser User argument.
3523 * @thread Any.
3524 */
3525VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3526{
3527 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3528
3529 /*
3530 * Validate input.
3531 */
3532 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3533 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3534
3535 /*
3536 * Allocate a new record.
3537 */
3538 PUVM pUVM = pVM->pUVM;
3539 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3540 if (!pNew)
3541 return VERR_NO_MEMORY;
3542
3543 /* fill */
3544 pNew->pfnAtState = pfnAtState;
3545 pNew->pvUser = pvUser;
3546
3547 /* insert */
3548 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3549 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3550 *pUVM->vm.s.ppAtStateNext = pNew;
3551 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3552 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3553
3554 return VINF_SUCCESS;
3555}
3556
3557
3558/**
3559 * Deregisters a VM state change callback.
3560 *
3561 * @returns VBox status code.
3562 * @param pVM VM handle.
3563 * @param pfnAtState Pointer to callback.
3564 * @param pvUser User argument.
3565 * @thread Any.
3566 */
3567VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3568{
3569 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3570
3571 /*
3572 * Validate input.
3573 */
3574 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3575 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3576
3577 PUVM pUVM = pVM->pUVM;
3578 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3579
3580 /*
3581 * Search the list for the entry.
3582 */
3583 PVMATSTATE pPrev = NULL;
3584 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3585 while ( pCur
3586 && ( pCur->pfnAtState != pfnAtState
3587 || pCur->pvUser != pvUser))
3588 {
3589 pPrev = pCur;
3590 pCur = pCur->pNext;
3591 }
3592 if (!pCur)
3593 {
3594 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3595 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3596 return VERR_FILE_NOT_FOUND;
3597 }
3598
3599 /*
3600 * Unlink it.
3601 */
3602 if (pPrev)
3603 {
3604 pPrev->pNext = pCur->pNext;
3605 if (!pCur->pNext)
3606 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3607 }
3608 else
3609 {
3610 pUVM->vm.s.pAtState = pCur->pNext;
3611 if (!pCur->pNext)
3612 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3613 }
3614
3615 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3616
3617 /*
3618 * Free it.
3619 */
3620 pCur->pfnAtState = NULL;
3621 pCur->pNext = NULL;
3622 MMR3HeapFree(pCur);
3623
3624 return VINF_SUCCESS;
3625}
3626
3627
3628/**
3629 * Registers a VM error callback.
3630 *
3631 * @returns VBox status code.
3632 * @param pVM The VM handle.
3633 * @param pfnAtError Pointer to callback.
3634 * @param pvUser User argument.
3635 * @thread Any.
3636 */
3637VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3638{
3639 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3640 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3641}
3642
3643
3644/**
3645 * Registers a VM error callback.
3646 *
3647 * @returns VBox status code.
3648 * @param pUVM The VM handle.
3649 * @param pfnAtError Pointer to callback.
3650 * @param pvUser User argument.
3651 * @thread Any.
3652 */
3653VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3654{
3655 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3656
3657 /*
3658 * Validate input.
3659 */
3660 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3661 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3662
3663 /*
3664 * Allocate a new record.
3665 */
3666 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3667 if (!pNew)
3668 return VERR_NO_MEMORY;
3669
3670 /* fill */
3671 pNew->pfnAtError = pfnAtError;
3672 pNew->pvUser = pvUser;
3673
3674 /* insert */
3675 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3676 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3677 *pUVM->vm.s.ppAtErrorNext = pNew;
3678 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3679 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3680
3681 return VINF_SUCCESS;
3682}
3683
3684
3685/**
3686 * Deregisters a VM error callback.
3687 *
3688 * @returns VBox status code.
3689 * @param pVM The VM handle.
3690 * @param pfnAtError Pointer to callback.
3691 * @param pvUser User argument.
3692 * @thread Any.
3693 */
3694VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3695{
3696 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3697
3698 /*
3699 * Validate input.
3700 */
3701 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3702 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3703
3704 PUVM pUVM = pVM->pUVM;
3705 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3706
3707 /*
3708 * Search the list for the entry.
3709 */
3710 PVMATERROR pPrev = NULL;
3711 PVMATERROR pCur = pUVM->vm.s.pAtError;
3712 while ( pCur
3713 && ( pCur->pfnAtError != pfnAtError
3714 || pCur->pvUser != pvUser))
3715 {
3716 pPrev = pCur;
3717 pCur = pCur->pNext;
3718 }
3719 if (!pCur)
3720 {
3721 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3722 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3723 return VERR_FILE_NOT_FOUND;
3724 }
3725
3726 /*
3727 * Unlink it.
3728 */
3729 if (pPrev)
3730 {
3731 pPrev->pNext = pCur->pNext;
3732 if (!pCur->pNext)
3733 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3734 }
3735 else
3736 {
3737 pUVM->vm.s.pAtError = pCur->pNext;
3738 if (!pCur->pNext)
3739 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3740 }
3741
3742 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3743
3744 /*
3745 * Free it.
3746 */
3747 pCur->pfnAtError = NULL;
3748 pCur->pNext = NULL;
3749 MMR3HeapFree(pCur);
3750
3751 return VINF_SUCCESS;
3752}
3753
3754
3755/**
3756 * Ellipsis to va_list wrapper for calling pfnAtError.
3757 */
3758static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3759{
3760 va_list va;
3761 va_start(va, pszFormat);
3762 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3763 va_end(va);
3764}
3765
3766
3767/**
3768 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3769 * The message is found in VMINT.
3770 *
3771 * @param pVM The VM handle.
3772 * @thread EMT.
3773 */
3774VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3775{
3776 VM_ASSERT_EMT(pVM);
3777 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contracts!\n"));
3778
3779 /*
3780 * Unpack the error (if we managed to format one).
3781 */
3782 PVMERROR pErr = pVM->vm.s.pErrorR3;
3783 const char *pszFile = NULL;
3784 const char *pszFunction = NULL;
3785 uint32_t iLine = 0;
3786 const char *pszMessage;
3787 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3788 if (pErr)
3789 {
3790 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3791 if (pErr->offFile)
3792 pszFile = (const char *)pErr + pErr->offFile;
3793 iLine = pErr->iLine;
3794 if (pErr->offFunction)
3795 pszFunction = (const char *)pErr + pErr->offFunction;
3796 if (pErr->offMessage)
3797 pszMessage = (const char *)pErr + pErr->offMessage;
3798 else
3799 pszMessage = "No message!";
3800 }
3801 else
3802 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3803
3804 /*
3805 * Call the at error callbacks.
3806 */
3807 PUVM pUVM = pVM->pUVM;
3808 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3809 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3810 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3811 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3812 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3813}
3814
3815
3816/**
3817 * Gets the number of errors raised via VMSetError.
3818 *
3819 * This can be used avoid double error messages.
3820 *
3821 * @returns The error count.
3822 * @param pVM The VM handle.
3823 */
3824VMMR3DECL(uint32_t) VMR3GetErrorCount(PVM pVM)
3825{
3826 AssertPtrReturn(pVM, 0);
3827 return VMR3GetErrorCountU(pVM->pUVM);
3828}
3829
3830
3831/**
3832 * Gets the number of errors raised via VMSetError.
3833 *
3834 * This can be used avoid double error messages.
3835 *
3836 * @returns The error count.
3837 * @param pVM The VM handle.
3838 */
3839VMMR3DECL(uint32_t) VMR3GetErrorCountU(PUVM pUVM)
3840{
3841 AssertPtrReturn(pUVM, 0);
3842 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3843 return pUVM->vm.s.cErrors;
3844}
3845
3846
3847/**
3848 * Creation time wrapper for vmR3SetErrorUV.
3849 *
3850 * @returns rc.
3851 * @param pUVM Pointer to the user mode VM structure.
3852 * @param rc The VBox status code.
3853 * @param RT_SRC_POS_DECL The source position of this error.
3854 * @param pszFormat Format string.
3855 * @param ... The arguments.
3856 * @thread Any thread.
3857 */
3858static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3859{
3860 va_list va;
3861 va_start(va, pszFormat);
3862 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3863 va_end(va);
3864 return rc;
3865}
3866
3867
3868/**
3869 * Worker which calls everyone listening to the VM error messages.
3870 *
3871 * @param pUVM Pointer to the user mode VM structure.
3872 * @param rc The VBox status code.
3873 * @param RT_SRC_POS_DECL The source position of this error.
3874 * @param pszFormat Format string.
3875 * @param pArgs Pointer to the format arguments.
3876 * @thread EMT
3877 */
3878DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3879{
3880 /*
3881 * Log the error.
3882 */
3883 va_list va3;
3884 va_copy(va3, *pArgs);
3885 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3886 "VMSetError: %N\n",
3887 pszFile, iLine, pszFunction, rc,
3888 pszFormat, &va3);
3889 va_end(va3);
3890
3891#ifdef LOG_ENABLED
3892 va_copy(va3, *pArgs);
3893 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3894 "%N\n",
3895 pszFile, iLine, pszFunction, rc,
3896 pszFormat, &va3);
3897 va_end(va3);
3898#endif
3899
3900 /*
3901 * Make a copy of the message.
3902 */
3903 if (pUVM->pVM)
3904 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3905
3906 /*
3907 * Call the at error callbacks.
3908 */
3909 bool fCalledSomeone = false;
3910 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3911 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3912 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3913 {
3914 va_list va2;
3915 va_copy(va2, *pArgs);
3916 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3917 va_end(va2);
3918 fCalledSomeone = true;
3919 }
3920 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3921}
3922
3923
3924/**
3925 * Registers a VM runtime error callback.
3926 *
3927 * @returns VBox status code.
3928 * @param pVM The VM handle.
3929 * @param pfnAtRuntimeError Pointer to callback.
3930 * @param pvUser User argument.
3931 * @thread Any.
3932 */
3933VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3934{
3935 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3936
3937 /*
3938 * Validate input.
3939 */
3940 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3941 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3942
3943 /*
3944 * Allocate a new record.
3945 */
3946 PUVM pUVM = pVM->pUVM;
3947 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3948 if (!pNew)
3949 return VERR_NO_MEMORY;
3950
3951 /* fill */
3952 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3953 pNew->pvUser = pvUser;
3954
3955 /* insert */
3956 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3957 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3958 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3959 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3960 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3961
3962 return VINF_SUCCESS;
3963}
3964
3965
3966/**
3967 * Deregisters a VM runtime error callback.
3968 *
3969 * @returns VBox status code.
3970 * @param pVM The VM handle.
3971 * @param pfnAtRuntimeError Pointer to callback.
3972 * @param pvUser User argument.
3973 * @thread Any.
3974 */
3975VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3976{
3977 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3978
3979 /*
3980 * Validate input.
3981 */
3982 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3983 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3984
3985 PUVM pUVM = pVM->pUVM;
3986 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3987
3988 /*
3989 * Search the list for the entry.
3990 */
3991 PVMATRUNTIMEERROR pPrev = NULL;
3992 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3993 while ( pCur
3994 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3995 || pCur->pvUser != pvUser))
3996 {
3997 pPrev = pCur;
3998 pCur = pCur->pNext;
3999 }
4000 if (!pCur)
4001 {
4002 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
4003 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4004 return VERR_FILE_NOT_FOUND;
4005 }
4006
4007 /*
4008 * Unlink it.
4009 */
4010 if (pPrev)
4011 {
4012 pPrev->pNext = pCur->pNext;
4013 if (!pCur->pNext)
4014 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
4015 }
4016 else
4017 {
4018 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
4019 if (!pCur->pNext)
4020 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
4021 }
4022
4023 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4024
4025 /*
4026 * Free it.
4027 */
4028 pCur->pfnAtRuntimeError = NULL;
4029 pCur->pNext = NULL;
4030 MMR3HeapFree(pCur);
4031
4032 return VINF_SUCCESS;
4033}
4034
4035
4036/**
4037 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
4038 * the state to FatalError(LS).
4039 *
4040 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
4041 * return code, see FNVMMEMTRENDEZVOUS.)
4042 *
4043 * @param pVM The VM handle.
4044 * @param pVCpu The VMCPU handle of the EMT.
4045 * @param pvUser Ignored.
4046 */
4047static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
4048{
4049 NOREF(pVCpu);
4050 Assert(!pvUser); NOREF(pvUser);
4051
4052 /*
4053 * The first EMT thru here changes the state.
4054 */
4055 if (pVCpu->idCpu == pVM->cCpus - 1)
4056 {
4057 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
4058 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
4059 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
4060 if (RT_FAILURE(rc))
4061 return rc;
4062 if (rc == 2)
4063 SSMR3Cancel(pVM);
4064
4065 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
4066 }
4067
4068 /* This'll make sure we get out of whereever we are (e.g. REM). */
4069 return VINF_EM_SUSPEND;
4070}
4071
4072
4073/**
4074 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
4075 *
4076 * This does the common parts after the error has been saved / retrieved.
4077 *
4078 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4079 *
4080 * @param pVM The VM handle.
4081 * @param fFlags The error flags.
4082 * @param pszErrorId Error ID string.
4083 * @param pszFormat Format string.
4084 * @param pVa Pointer to the format arguments.
4085 */
4086static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4087{
4088 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
4089
4090 /*
4091 * Take actions before the call.
4092 */
4093 int rc;
4094 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4095 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4096 vmR3SetRuntimeErrorChangeState, NULL);
4097 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4098 rc = VMR3Suspend(pVM);
4099 else
4100 rc = VINF_SUCCESS;
4101
4102 /*
4103 * Do the callback round.
4104 */
4105 PUVM pUVM = pVM->pUVM;
4106 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4107 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4108 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4109 {
4110 va_list va;
4111 va_copy(va, *pVa);
4112 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4113 va_end(va);
4114 }
4115 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4116
4117 return rc;
4118}
4119
4120
4121/**
4122 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4123 */
4124static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4125{
4126 va_list va;
4127 va_start(va, pszFormat);
4128 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4129 va_end(va);
4130 return rc;
4131}
4132
4133
4134/**
4135 * This is a worker function for RC and Ring-0 calls to VMSetError and
4136 * VMSetErrorV.
4137 *
4138 * The message is found in VMINT.
4139 *
4140 * @returns VBox status code, see VMSetRuntimeError.
4141 * @param pVM The VM handle.
4142 * @thread EMT.
4143 */
4144VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4145{
4146 VM_ASSERT_EMT(pVM);
4147 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4148
4149 /*
4150 * Unpack the error (if we managed to format one).
4151 */
4152 const char *pszErrorId = "SetRuntimeError";
4153 const char *pszMessage = "No message!";
4154 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4155 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4156 if (pErr)
4157 {
4158 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4159 if (pErr->offErrorId)
4160 pszErrorId = (const char *)pErr + pErr->offErrorId;
4161 if (pErr->offMessage)
4162 pszMessage = (const char *)pErr + pErr->offMessage;
4163 fFlags = pErr->fFlags;
4164 }
4165
4166 /*
4167 * Join cause with vmR3SetRuntimeErrorV.
4168 */
4169 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4170}
4171
4172
4173/**
4174 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4175 *
4176 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4177 *
4178 * @param pVM The VM handle.
4179 * @param fFlags The error flags.
4180 * @param pszErrorId Error ID string.
4181 * @param pszMessage The error message residing the MM heap.
4182 *
4183 * @thread EMT
4184 */
4185DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4186{
4187#if 0 /** @todo make copy of the error msg. */
4188 /*
4189 * Make a copy of the message.
4190 */
4191 va_list va2;
4192 va_copy(va2, *pVa);
4193 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4194 va_end(va2);
4195#endif
4196
4197 /*
4198 * Join paths with VMR3SetRuntimeErrorWorker.
4199 */
4200 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4201 MMR3HeapFree(pszMessage);
4202 return rc;
4203}
4204
4205
4206/**
4207 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4208 *
4209 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4210 *
4211 * @param pVM The VM handle.
4212 * @param fFlags The error flags.
4213 * @param pszErrorId Error ID string.
4214 * @param pszFormat Format string.
4215 * @param pVa Pointer to the format arguments.
4216 *
4217 * @thread EMT
4218 */
4219DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4220{
4221 /*
4222 * Make a copy of the message.
4223 */
4224 va_list va2;
4225 va_copy(va2, *pVa);
4226 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4227 va_end(va2);
4228
4229 /*
4230 * Join paths with VMR3SetRuntimeErrorWorker.
4231 */
4232 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4233}
4234
4235
4236/**
4237 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4238 *
4239 * This can be used avoid double error messages.
4240 *
4241 * @returns The runtime error count.
4242 * @param pVM The VM handle.
4243 */
4244VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
4245{
4246 return pVM->pUVM->vm.s.cRuntimeErrors;
4247}
4248
4249
4250/**
4251 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4252 *
4253 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4254 *
4255 * @param pVM The VM handle.
4256 */
4257VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4258{
4259 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4260 return pUVCpu
4261 ? pUVCpu->idCpu
4262 : NIL_VMCPUID;
4263}
4264
4265
4266/**
4267 * Returns the native handle of the current EMT VMCPU thread.
4268 *
4269 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4270 * @param pVM The VM handle.
4271 * @thread EMT
4272 */
4273VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4274{
4275 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4276
4277 if (!pUVCpu)
4278 return NIL_RTNATIVETHREAD;
4279
4280 return pUVCpu->vm.s.NativeThreadEMT;
4281}
4282
4283
4284/**
4285 * Returns the native handle of the current EMT VMCPU thread.
4286 *
4287 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4288 * @param pVM The VM handle.
4289 * @thread EMT
4290 */
4291VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4292{
4293 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4294
4295 if (!pUVCpu)
4296 return NIL_RTNATIVETHREAD;
4297
4298 return pUVCpu->vm.s.NativeThreadEMT;
4299}
4300
4301
4302/**
4303 * Returns the handle of the current EMT VMCPU thread.
4304 *
4305 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4306 * @param pVM The VM handle.
4307 * @thread EMT
4308 */
4309VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
4310{
4311 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4312
4313 if (!pUVCpu)
4314 return NIL_RTTHREAD;
4315
4316 return pUVCpu->vm.s.ThreadEMT;
4317}
4318
4319
4320/**
4321 * Returns the handle of the current EMT VMCPU thread.
4322 *
4323 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4324 * @param pVM The VM handle.
4325 * @thread EMT
4326 */
4327VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
4328{
4329 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4330
4331 if (!pUVCpu)
4332 return NIL_RTTHREAD;
4333
4334 return pUVCpu->vm.s.ThreadEMT;
4335}
4336
4337
4338/**
4339 * Return the package and core id of a CPU.
4340 *
4341 * @returns VBOX status code.
4342 * @param pVM The VM to operate on.
4343 * @param idCpu Virtual CPU to get the ID from.
4344 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4345 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4346 *
4347 */
4348VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PVM pVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4349{
4350 /*
4351 * Validate input.
4352 */
4353 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4354 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4355 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4356 if (idCpu >= pVM->cCpus)
4357 return VERR_INVALID_CPU_ID;
4358
4359 /*
4360 * Set return values.
4361 */
4362#ifdef VBOX_WITH_MULTI_CORE
4363 *pidCpuCore = idCpu;
4364 *pidCpuPackage = 0;
4365#else
4366 *pidCpuCore = 0;
4367 *pidCpuPackage = idCpu;
4368#endif
4369
4370 return VINF_SUCCESS;
4371}
4372
4373
4374/**
4375 * Worker for VMR3HotUnplugCpu.
4376 *
4377 * @returns VINF_EM_WAIT_SPIP (strict status code).
4378 * @param pVM The VM handle.
4379 * @param idCpu The current CPU.
4380 */
4381static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4382{
4383 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4384 VMCPU_ASSERT_EMT(pVCpu);
4385
4386 /*
4387 * Reset per CPU resources.
4388 *
4389 * Actually only needed for VT-x because the CPU seems to be still in some
4390 * paged mode and startup fails after a new hot plug event. SVM works fine
4391 * even without this.
4392 */
4393 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4394 PGMR3ResetUnpluggedCpu(pVM, pVCpu);
4395 PDMR3ResetCpu(pVCpu);
4396 TRPMR3ResetCpu(pVCpu);
4397 CPUMR3ResetCpu(pVCpu);
4398 EMR3ResetCpu(pVCpu);
4399 HWACCMR3ResetCpu(pVCpu);
4400 return VINF_EM_WAIT_SIPI;
4401}
4402
4403
4404/**
4405 * Hot-unplugs a CPU from the guest.
4406 *
4407 * @returns VBox status code.
4408 * @param pVM The VM to operate on.
4409 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4410 */
4411VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4412{
4413 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4414 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4415
4416 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4417 * broadcast requests. Just note down somewhere that the CPU is
4418 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4419 * it out of the EM loops when offline. */
4420 return VMR3ReqCallNoWaitU(pVM->pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4421}
4422
4423
4424/**
4425 * Hot-plugs a CPU on the guest.
4426 *
4427 * @returns VBox status code.
4428 * @param pVM The VM to operate on.
4429 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4430 */
4431VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
4432{
4433 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4434 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4435
4436 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4437 return VINF_SUCCESS;
4438}
4439
4440
4441/**
4442 * Changes the VMM execution cap.
4443 *
4444 * @returns VBox status code.
4445 * @param pVM The VM to operate on.
4446 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4447 * 100 is max performance (default).
4448 */
4449VMMR3DECL(int) VMR3SetCpuExecutionCap(PVM pVM, uint32_t uCpuExecutionCap)
4450{
4451 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4452 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4453
4454 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4455 /* Note: not called from EMT. */
4456 pVM->uCpuExecutionCap = uCpuExecutionCap;
4457 return VINF_SUCCESS;
4458}
4459
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette