VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 37466

Last change on this file since 37466 was 37465, checked in by vboxsync, 13 years ago

build fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 156.7 KB
Line 
1/* $Id: VM.cpp 37465 2011-06-15 10:07:58Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41/*******************************************************************************
42* Header Files *
43*******************************************************************************/
44#define LOG_GROUP LOG_GROUP_VM
45#include <VBox/vmm/cfgm.h>
46#include <VBox/vmm/vmm.h>
47#include <VBox/vmm/gvmm.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/cpum.h>
50#include <VBox/vmm/selm.h>
51#include <VBox/vmm/trpm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/vmm/pgm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/em.h>
57#include <VBox/vmm/iem.h>
58#include <VBox/vmm/rem.h>
59#include <VBox/vmm/tm.h>
60#include <VBox/vmm/stam.h>
61#include <VBox/vmm/patm.h>
62#include <VBox/vmm/csam.h>
63#include <VBox/vmm/iom.h>
64#include <VBox/vmm/ssm.h>
65#include <VBox/vmm/ftm.h>
66#include <VBox/vmm/hwaccm.h>
67#include "VMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70
71#include <VBox/sup.h>
72#include <VBox/dbg.h>
73#include <VBox/err.h>
74#include <VBox/param.h>
75#include <VBox/log.h>
76#include <iprt/assert.h>
77#include <iprt/alloc.h>
78#include <iprt/asm.h>
79#include <iprt/env.h>
80#include <iprt/string.h>
81#include <iprt/time.h>
82#include <iprt/semaphore.h>
83#include <iprt/thread.h>
84#include <iprt/uuid.h>
85
86
87/*******************************************************************************
88* Structures and Typedefs *
89*******************************************************************************/
90/**
91 * VM destruction callback registration record.
92 */
93typedef struct VMATDTOR
94{
95 /** Pointer to the next record in the list. */
96 struct VMATDTOR *pNext;
97 /** Pointer to the callback function. */
98 PFNVMATDTOR pfnAtDtor;
99 /** The user argument. */
100 void *pvUser;
101} VMATDTOR;
102/** Pointer to a VM destruction callback registration record. */
103typedef VMATDTOR *PVMATDTOR;
104
105
106/*******************************************************************************
107* Global Variables *
108*******************************************************************************/
109/** Pointer to the list of VMs. */
110static PUVM g_pUVMsHead = NULL;
111
112/** Pointer to the list of at VM destruction callbacks. */
113static PVMATDTOR g_pVMAtDtorHead = NULL;
114/** Lock the g_pVMAtDtorHead list. */
115#define VM_ATDTOR_LOCK() do { } while (0)
116/** Unlock the g_pVMAtDtorHead list. */
117#define VM_ATDTOR_UNLOCK() do { } while (0)
118
119
120/*******************************************************************************
121* Internal Functions *
122*******************************************************************************/
123static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
124static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
125static int vmR3InitRing3(PVM pVM, PUVM pUVM);
126static int vmR3InitRing0(PVM pVM);
127static int vmR3InitGC(PVM pVM);
128static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
129static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
130static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
131static void vmR3AtDtor(PVM pVM);
132static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
133static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
134static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
135static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
136static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
137static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
138
139
140/**
141 * Do global VMM init.
142 *
143 * @returns VBox status code.
144 */
145VMMR3DECL(int) VMR3GlobalInit(void)
146{
147 /*
148 * Only once.
149 */
150 static bool volatile s_fDone = false;
151 if (s_fDone)
152 return VINF_SUCCESS;
153
154 /*
155 * We're done.
156 */
157 s_fDone = true;
158 return VINF_SUCCESS;
159}
160
161
162
163/**
164 * Creates a virtual machine by calling the supplied configuration constructor.
165 *
166 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
167 * called to start the execution.
168 *
169 * @returns 0 on success.
170 * @returns VBox error code on failure.
171 * @param cCpus Number of virtual CPUs for the new VM.
172 * @param pVmm2UserMethods An optional method table that the VMM can use
173 * to make the user perform various action, like
174 * for instance state saving.
175 * @param pfnVMAtError Pointer to callback function for setting VM
176 * errors. This was added as an implicit call to
177 * VMR3AtErrorRegister() since there is no way the
178 * caller can get to the VM handle early enough to
179 * do this on its own.
180 * This is called in the context of an EMT.
181 * @param pvUserVM The user argument passed to pfnVMAtError.
182 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
183 * This is called in the context of an EMT0.
184 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
185 * @param ppVM Where to store the 'handle' of the created VM.
186 */
187VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
188 PFNVMATERROR pfnVMAtError, void *pvUserVM,
189 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
190 PVM *ppVM)
191{
192 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
193 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
194
195 if (pVmm2UserMethods)
196 {
197 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
198 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
199 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
200 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
201 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
202 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
203 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
204 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
205 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
206 }
207 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
208 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
209 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
210
211 /*
212 * Because of the current hackiness of the applications
213 * we'll have to initialize global stuff from here.
214 * Later the applications will take care of this in a proper way.
215 */
216 static bool fGlobalInitDone = false;
217 if (!fGlobalInitDone)
218 {
219 int rc = VMR3GlobalInit();
220 if (RT_FAILURE(rc))
221 return rc;
222 fGlobalInitDone = true;
223 }
224
225 /*
226 * Validate input.
227 */
228 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
229
230 /*
231 * Create the UVM so we can register the at-error callback
232 * and consolidate a bit of cleanup code.
233 */
234 PUVM pUVM = NULL; /* shuts up gcc */
235 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
236 if (RT_FAILURE(rc))
237 return rc;
238 if (pfnVMAtError)
239 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
240 if (RT_SUCCESS(rc))
241 {
242 /*
243 * Initialize the support library creating the session for this VM.
244 */
245 rc = SUPR3Init(&pUVM->vm.s.pSession);
246 if (RT_SUCCESS(rc))
247 {
248 /*
249 * Call vmR3CreateU in the EMT thread and wait for it to finish.
250 *
251 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
252 * submitting a request to a specific VCPU without a pVM. So, to make
253 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
254 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
255 */
256 PVMREQ pReq;
257 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
258 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
259 if (RT_SUCCESS(rc))
260 {
261 rc = pReq->iStatus;
262 VMR3ReqFree(pReq);
263 if (RT_SUCCESS(rc))
264 {
265 /*
266 * Success!
267 */
268 *ppVM = pUVM->pVM;
269 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
270 return VINF_SUCCESS;
271 }
272 }
273 else
274 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
275
276 /*
277 * An error occurred during VM creation. Set the error message directly
278 * using the initial callback, as the callback list might not exist yet.
279 */
280 const char *pszError;
281 switch (rc)
282 {
283 case VERR_VMX_IN_VMX_ROOT_MODE:
284#ifdef RT_OS_LINUX
285 pszError = N_("VirtualBox can't operate in VMX root mode. "
286 "Please disable the KVM kernel extension, recompile your kernel and reboot");
287#else
288 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
289#endif
290 break;
291
292#ifndef RT_OS_DARWIN
293 case VERR_HWACCM_CONFIG_MISMATCH:
294 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
295 "This hardware extension is required by the VM configuration");
296 break;
297#endif
298
299 case VERR_SVM_IN_USE:
300#ifdef RT_OS_LINUX
301 pszError = N_("VirtualBox can't enable the AMD-V extension. "
302 "Please disable the KVM kernel extension, recompile your kernel and reboot");
303#else
304 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
305#endif
306 break;
307
308#ifdef RT_OS_LINUX
309 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
310 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
311 "that no kernel modules from an older version of VirtualBox exist. "
312 "Then try to recompile and reload the kernel modules by executing "
313 "'/etc/init.d/vboxdrv setup' as root");
314 break;
315#endif
316
317 case VERR_RAW_MODE_INVALID_SMP:
318 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
319 "VirtualBox requires this hardware extension to emulate more than one "
320 "guest CPU");
321 break;
322
323 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
324#ifdef RT_OS_LINUX
325 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
326 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
327 "the VT-x extension in the VM settings. Note that without VT-x you have "
328 "to reduce the number of guest CPUs to one");
329#else
330 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
331 "extension. Either upgrade your kernel or disable the VT-x extension in the "
332 "VM settings. Note that without VT-x you have to reduce the number of guest "
333 "CPUs to one");
334#endif
335 break;
336
337 case VERR_PDM_DEVICE_NOT_FOUND:
338 pszError = N_("A virtual device is configured in the VM settings but the device "
339 "implementation is missing.\n"
340 "A possible reason for this error is a missing extension pack. Note "
341 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
342 "support and remote desktop) are only available from an 'extension "
343 "pack' which must be downloaded and installed separately");
344 break;
345
346 default:
347 if (VMR3GetErrorCountU(pUVM) == 0)
348 pszError = RTErrGetFull(rc);
349 else
350 pszError = NULL; /* already set. */
351 break;
352 }
353 if (pszError)
354 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
355 }
356 else
357 {
358 /*
359 * An error occurred at support library initialization time (before the
360 * VM could be created). Set the error message directly using the
361 * initial callback, as the callback list doesn't exist yet.
362 */
363 const char *pszError;
364 switch (rc)
365 {
366 case VERR_VM_DRIVER_LOAD_ERROR:
367#ifdef RT_OS_LINUX
368 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
369 "was either not loaded or /dev/vboxdrv is not set up properly. "
370 "Re-setup the kernel module by executing "
371 "'/etc/init.d/vboxdrv setup' as root");
372#else
373 pszError = N_("VirtualBox kernel driver not loaded");
374#endif
375 break;
376 case VERR_VM_DRIVER_OPEN_ERROR:
377 pszError = N_("VirtualBox kernel driver cannot be opened");
378 break;
379 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
380#ifdef VBOX_WITH_HARDENING
381 /* This should only happen if the executable wasn't hardened - bad code/build. */
382 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
383 "Re-install VirtualBox. If you are building it yourself, you "
384 "should make sure it installed correctly and that the setuid "
385 "bit is set on the executables calling VMR3Create.");
386#else
387 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
388# if defined(RT_OS_DARWIN)
389 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
390 "If you have built VirtualBox yourself, make sure that you do not "
391 "have the vboxdrv KEXT from a different build or installation loaded.");
392# elif defined(RT_OS_LINUX)
393 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
394 "If you have built VirtualBox yourself, make sure that you do "
395 "not have the vboxdrv kernel module from a different build or "
396 "installation loaded. Also, make sure the vboxdrv udev rule gives "
397 "you the permission you need to access the device.");
398# elif defined(RT_OS_WINDOWS)
399 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
400# else /* solaris, freebsd, ++. */
401 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
402 "If you have built VirtualBox yourself, make sure that you do "
403 "not have the vboxdrv kernel module from a different install loaded.");
404# endif
405#endif
406 break;
407 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
408 case VERR_VM_DRIVER_NOT_INSTALLED:
409#ifdef RT_OS_LINUX
410 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
411 "was either not loaded or /dev/vboxdrv was not created for some "
412 "reason. Re-setup the kernel module by executing "
413 "'/etc/init.d/vboxdrv setup' as root");
414#else
415 pszError = N_("VirtualBox kernel driver not installed");
416#endif
417 break;
418 case VERR_NO_MEMORY:
419 pszError = N_("VirtualBox support library out of memory");
420 break;
421 case VERR_VERSION_MISMATCH:
422 case VERR_VM_DRIVER_VERSION_MISMATCH:
423 pszError = N_("The VirtualBox support driver which is running is from a different "
424 "version of VirtualBox. You can correct this by stopping all "
425 "running instances of VirtualBox and reinstalling the software.");
426 break;
427 default:
428 pszError = N_("Unknown error initializing kernel driver");
429 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
430 }
431 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
432 }
433 }
434
435 /* cleanup */
436 vmR3DestroyUVM(pUVM, 2000);
437 LogFlow(("VMR3Create: returns %Rrc\n", rc));
438 return rc;
439}
440
441
442/**
443 * Creates the UVM.
444 *
445 * This will not initialize the support library even if vmR3DestroyUVM
446 * will terminate that.
447 *
448 * @returns VBox status code.
449 * @param cCpus Number of virtual CPUs
450 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
451 * table.
452 * @param ppUVM Where to store the UVM pointer.
453 */
454static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
455{
456 uint32_t i;
457
458 /*
459 * Create and initialize the UVM.
460 */
461 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
462 AssertReturn(pUVM, VERR_NO_MEMORY);
463 pUVM->u32Magic = UVM_MAGIC;
464 pUVM->cCpus = cCpus;
465 pUVM->pVmm2UserMethods = pVmm2UserMethods;
466
467 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
468
469 pUVM->vm.s.cUvmRefs = 1;
470 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
471 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
472 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
473
474 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
475 RTUuidClear(&pUVM->vm.s.Uuid);
476
477 /* Initialize the VMCPU array in the UVM. */
478 for (i = 0; i < cCpus; i++)
479 {
480 pUVM->aCpus[i].pUVM = pUVM;
481 pUVM->aCpus[i].idCpu = i;
482 }
483
484 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
485 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
486 AssertRC(rc);
487 if (RT_SUCCESS(rc))
488 {
489 /* Allocate a halt method event semaphore for each VCPU. */
490 for (i = 0; i < cCpus; i++)
491 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
492 for (i = 0; i < cCpus; i++)
493 {
494 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
495 if (RT_FAILURE(rc))
496 break;
497 }
498 if (RT_SUCCESS(rc))
499 {
500 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
501 if (RT_SUCCESS(rc))
502 {
503 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
504 if (RT_SUCCESS(rc))
505 {
506 /*
507 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
508 */
509 rc = STAMR3InitUVM(pUVM);
510 if (RT_SUCCESS(rc))
511 {
512 rc = MMR3InitUVM(pUVM);
513 if (RT_SUCCESS(rc))
514 {
515 rc = PDMR3InitUVM(pUVM);
516 if (RT_SUCCESS(rc))
517 {
518 /*
519 * Start the emulation threads for all VMCPUs.
520 */
521 for (i = 0; i < cCpus; i++)
522 {
523 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
524 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
525 cCpus > 1 ? "EMT-%u" : "EMT", i);
526 if (RT_FAILURE(rc))
527 break;
528
529 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
530 }
531
532 if (RT_SUCCESS(rc))
533 {
534 *ppUVM = pUVM;
535 return VINF_SUCCESS;
536 }
537
538 /* bail out. */
539 while (i-- > 0)
540 {
541 /** @todo rainy day: terminate the EMTs. */
542 }
543 PDMR3TermUVM(pUVM);
544 }
545 MMR3TermUVM(pUVM);
546 }
547 STAMR3TermUVM(pUVM);
548 }
549 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
550 }
551 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
552 }
553 }
554 for (i = 0; i < cCpus; i++)
555 {
556 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
557 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
558 }
559 RTTlsFree(pUVM->vm.s.idxTLS);
560 }
561 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
562 return rc;
563}
564
565
566/**
567 * Creates and initializes the VM.
568 *
569 * @thread EMT
570 */
571static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
572{
573 /*
574 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
575 */
576 int rc = PDMR3LdrLoadVMMR0U(pUVM);
577 if (RT_FAILURE(rc))
578 {
579 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
580 * bird: what about moving the message down here? Main picks the first message, right? */
581 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
582 return rc; /* proper error message set later on */
583 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
584 }
585
586 /*
587 * Request GVMM to create a new VM for us.
588 */
589 GVMMCREATEVMREQ CreateVMReq;
590 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
591 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
592 CreateVMReq.pSession = pUVM->vm.s.pSession;
593 CreateVMReq.pVMR0 = NIL_RTR0PTR;
594 CreateVMReq.pVMR3 = NULL;
595 CreateVMReq.cCpus = cCpus;
596 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
597 if (RT_SUCCESS(rc))
598 {
599 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
600 AssertRelease(VALID_PTR(pVM));
601 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
602 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
603 AssertRelease(pVM->cCpus == cCpus);
604 AssertRelease(pVM->uCpuExecutionCap == 100);
605 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
606 AssertCompileMemberAlignment(VM, cpum, 64);
607 AssertCompileMemberAlignment(VM, tm, 64);
608 AssertCompileMemberAlignment(VM, aCpus, PAGE_SIZE);
609
610 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
611 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
612
613 /*
614 * Initialize the VM structure and our internal data (VMINT).
615 */
616 pVM->pUVM = pUVM;
617
618 for (VMCPUID i = 0; i < pVM->cCpus; i++)
619 {
620 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
621 pVM->aCpus[i].idCpu = i;
622 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
623 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
624 /* hNativeThreadR0 is initialized on EMT registration. */
625 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
626 pUVM->aCpus[i].pVM = pVM;
627 }
628
629
630 /*
631 * Init the configuration.
632 */
633 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
634 if (RT_SUCCESS(rc))
635 {
636 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
637 rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
638 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
639 pVM->fHWACCMEnabled = true;
640
641 /*
642 * If executing in fake suplib mode disable RR3 and RR0 in the config.
643 */
644 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
645 if (psz && !strcmp(psz, "fake"))
646 {
647 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
648 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
649 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
650 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
651 }
652
653 /*
654 * Make sure the CPU count in the config data matches.
655 */
656 if (RT_SUCCESS(rc))
657 {
658 uint32_t cCPUsCfg;
659 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
660 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
661 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
662 {
663 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
664 cCPUsCfg, cCpus));
665 rc = VERR_INVALID_PARAMETER;
666 }
667 }
668
669 /*
670 * Get the CPU execution cap.
671 */
672 if (RT_SUCCESS(rc))
673 {
674 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
675 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc));
676 }
677
678 /*
679 * Get the VM name and UUID.
680 */
681 if (RT_SUCCESS(rc))
682 {
683 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
684 AssertLogRelMsg(RT_SUCCESS(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc));
685 }
686
687 if (RT_SUCCESS(rc))
688 {
689 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
690 AssertLogRelMsg(RT_SUCCESS(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc));
691 }
692
693 if (RT_SUCCESS(rc))
694 {
695 /*
696 * Init the ring-3 components and ring-3 per cpu data, finishing it off
697 * by a relocation round (intermediate context finalization will do this).
698 */
699 rc = vmR3InitRing3(pVM, pUVM);
700 if (RT_SUCCESS(rc))
701 {
702 rc = PGMR3FinalizeMappings(pVM);
703 if (RT_SUCCESS(rc))
704 {
705
706 LogFlow(("Ring-3 init succeeded\n"));
707
708 /*
709 * Init the Ring-0 components.
710 */
711 rc = vmR3InitRing0(pVM);
712 if (RT_SUCCESS(rc))
713 {
714 /* Relocate again, because some switcher fixups depends on R0 init results. */
715 VMR3Relocate(pVM, 0);
716
717#ifdef VBOX_WITH_DEBUGGER
718 /*
719 * Init the tcp debugger console if we're building
720 * with debugger support.
721 */
722 void *pvUser = NULL;
723 rc = DBGCTcpCreate(pVM, &pvUser);
724 if ( RT_SUCCESS(rc)
725 || rc == VERR_NET_ADDRESS_IN_USE)
726 {
727 pUVM->vm.s.pvDBGC = pvUser;
728#endif
729 /*
730 * Init the Guest Context components.
731 */
732 rc = vmR3InitGC(pVM);
733 if (RT_SUCCESS(rc))
734 {
735 /*
736 * Now we can safely set the VM halt method to default.
737 */
738 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
739 if (RT_SUCCESS(rc))
740 {
741 /*
742 * Set the state and link into the global list.
743 */
744 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
745 pUVM->pNext = g_pUVMsHead;
746 g_pUVMsHead = pUVM;
747
748#ifdef LOG_ENABLED
749 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
750#endif
751 return VINF_SUCCESS;
752 }
753 }
754#ifdef VBOX_WITH_DEBUGGER
755 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
756 pUVM->vm.s.pvDBGC = NULL;
757 }
758#endif
759 //..
760 }
761 }
762 vmR3Destroy(pVM);
763 }
764 }
765 //..
766
767 /* Clean CFGM. */
768 int rc2 = CFGMR3Term(pVM);
769 AssertRC(rc2);
770 }
771
772 /*
773 * Do automatic cleanups while the VM structure is still alive and all
774 * references to it are still working.
775 */
776 PDMR3CritSectTerm(pVM);
777
778 /*
779 * Drop all references to VM and the VMCPU structures, then
780 * tell GVMM to destroy the VM.
781 */
782 pUVM->pVM = NULL;
783 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
784 {
785 pUVM->aCpus[i].pVM = NULL;
786 pUVM->aCpus[i].pVCpu = NULL;
787 }
788 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
789
790 if (pUVM->cCpus > 1)
791 {
792 /* Poke the other EMTs since they may have stale pVM and pVCpu references
793 on the stack (see VMR3WaitU for instance) if they've been awakened after
794 VM creation. */
795 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
796 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
797 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
798 }
799
800 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
801 AssertRC(rc2);
802 }
803 else
804 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
805
806 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
807 return rc;
808}
809
810
811/**
812 * Register the calling EMT with GVM.
813 *
814 * @returns VBox status code.
815 * @param pVM The VM handle.
816 * @param idCpu The Virtual CPU ID.
817 */
818static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
819{
820 Assert(VMMGetCpuId(pVM) == idCpu);
821 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
822 if (RT_FAILURE(rc))
823 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
824 return rc;
825}
826
827
828/**
829 * Initializes all R3 components of the VM
830 */
831static int vmR3InitRing3(PVM pVM, PUVM pUVM)
832{
833 int rc;
834
835 /*
836 * Register the other EMTs with GVM.
837 */
838 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
839 {
840 rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
841 if (RT_FAILURE(rc))
842 return rc;
843 }
844
845 /*
846 * Init all R3 components, the order here might be important.
847 */
848 rc = MMR3Init(pVM);
849 if (RT_SUCCESS(rc))
850 {
851 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
852 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
853 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
854 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
855 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
856 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
857 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
858 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
859 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
860 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
861 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
862 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
863 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
864 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
865
866 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
867 {
868 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
869 AssertRC(rc);
870 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
871 AssertRC(rc);
872 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/VM/CPU%d/Halt/BlockOverslept", idCpu);
873 AssertRC(rc);
874 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/VM/CPU%d/Halt/BlockInsomnia", idCpu);
875 AssertRC(rc);
876 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/VM/CPU%d/Halt/BlockOnTime", idCpu);
877 AssertRC(rc);
878 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
879 AssertRC(rc);
880 }
881
882 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
883 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
884 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
885 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
886 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
887 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
888 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
889 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
890
891 rc = CPUMR3Init(pVM);
892 if (RT_SUCCESS(rc))
893 {
894 rc = HWACCMR3Init(pVM);
895 if (RT_SUCCESS(rc))
896 {
897 rc = PGMR3Init(pVM);
898 if (RT_SUCCESS(rc))
899 {
900 rc = REMR3Init(pVM);
901 if (RT_SUCCESS(rc))
902 {
903 rc = MMR3InitPaging(pVM);
904 if (RT_SUCCESS(rc))
905 rc = TMR3Init(pVM);
906 if (RT_SUCCESS(rc))
907 {
908 rc = FTMR3Init(pVM);
909 if (RT_SUCCESS(rc))
910 {
911 rc = VMMR3Init(pVM);
912 if (RT_SUCCESS(rc))
913 {
914 rc = SELMR3Init(pVM);
915 if (RT_SUCCESS(rc))
916 {
917 rc = TRPMR3Init(pVM);
918 if (RT_SUCCESS(rc))
919 {
920 rc = CSAMR3Init(pVM);
921 if (RT_SUCCESS(rc))
922 {
923 rc = PATMR3Init(pVM);
924 if (RT_SUCCESS(rc))
925 {
926 rc = IOMR3Init(pVM);
927 if (RT_SUCCESS(rc))
928 {
929 rc = EMR3Init(pVM);
930 if (RT_SUCCESS(rc))
931 {
932 rc = IEMR3Init(pVM);
933 if (RT_SUCCESS(rc))
934 {
935 rc = DBGFR3Init(pVM);
936 if (RT_SUCCESS(rc))
937 {
938 rc = PDMR3Init(pVM);
939 if (RT_SUCCESS(rc))
940 {
941 rc = PGMR3InitDynMap(pVM);
942 if (RT_SUCCESS(rc))
943 rc = MMR3HyperInitFinalize(pVM);
944 if (RT_SUCCESS(rc))
945 rc = PATMR3InitFinalize(pVM);
946 if (RT_SUCCESS(rc))
947 rc = PGMR3InitFinalize(pVM);
948 if (RT_SUCCESS(rc))
949 rc = SELMR3InitFinalize(pVM);
950 if (RT_SUCCESS(rc))
951 rc = TMR3InitFinalize(pVM);
952 if (RT_SUCCESS(rc))
953 rc = REMR3InitFinalize(pVM);
954 if (RT_SUCCESS(rc))
955 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
956 if (RT_SUCCESS(rc))
957 {
958 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
959 return VINF_SUCCESS;
960 }
961
962 int rc2 = PDMR3Term(pVM);
963 AssertRC(rc2);
964 }
965 int rc2 = DBGFR3Term(pVM);
966 AssertRC(rc2);
967 }
968 int rc2 = IEMR3Term(pVM);
969 AssertRC(rc2);
970 }
971 int rc2 = EMR3Term(pVM);
972 AssertRC(rc2);
973 }
974 int rc2 = IOMR3Term(pVM);
975 AssertRC(rc2);
976 }
977 int rc2 = PATMR3Term(pVM);
978 AssertRC(rc2);
979 }
980 int rc2 = CSAMR3Term(pVM);
981 AssertRC(rc2);
982 }
983 int rc2 = TRPMR3Term(pVM);
984 AssertRC(rc2);
985 }
986 int rc2 = SELMR3Term(pVM);
987 AssertRC(rc2);
988 }
989 int rc2 = VMMR3Term(pVM);
990 AssertRC(rc2);
991 }
992 int rc2 = FTMR3Term(pVM);
993 AssertRC(rc2);
994 }
995 int rc2 = TMR3Term(pVM);
996 AssertRC(rc2);
997 }
998 int rc2 = REMR3Term(pVM);
999 AssertRC(rc2);
1000 }
1001 int rc2 = PGMR3Term(pVM);
1002 AssertRC(rc2);
1003 }
1004 int rc2 = HWACCMR3Term(pVM);
1005 AssertRC(rc2);
1006 }
1007 //int rc2 = CPUMR3Term(pVM);
1008 //AssertRC(rc2);
1009 }
1010 /* MMR3Term is not called here because it'll kill the heap. */
1011 }
1012
1013 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
1014 return rc;
1015}
1016
1017
1018/**
1019 * Initializes all R0 components of the VM
1020 */
1021static int vmR3InitRing0(PVM pVM)
1022{
1023 LogFlow(("vmR3InitRing0:\n"));
1024
1025 /*
1026 * Check for FAKE suplib mode.
1027 */
1028 int rc = VINF_SUCCESS;
1029 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1030 if (!psz || strcmp(psz, "fake"))
1031 {
1032 /*
1033 * Call the VMMR0 component and let it do the init.
1034 */
1035 rc = VMMR3InitR0(pVM);
1036 }
1037 else
1038 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1039
1040 /*
1041 * Do notifications and return.
1042 */
1043 if (RT_SUCCESS(rc))
1044 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1045 if (RT_SUCCESS(rc))
1046 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HWACCM);
1047
1048 /** @todo Move this to the VMINITCOMPLETED_HWACCM notification handler. */
1049 if (RT_SUCCESS(rc))
1050 CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
1051
1052 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1053 return rc;
1054}
1055
1056
1057/**
1058 * Initializes all GC components of the VM
1059 */
1060static int vmR3InitGC(PVM pVM)
1061{
1062 LogFlow(("vmR3InitGC:\n"));
1063
1064 /*
1065 * Check for FAKE suplib mode.
1066 */
1067 int rc = VINF_SUCCESS;
1068 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1069 if (!psz || strcmp(psz, "fake"))
1070 {
1071 /*
1072 * Call the VMMR0 component and let it do the init.
1073 */
1074 rc = VMMR3InitRC(pVM);
1075 }
1076 else
1077 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1078
1079 /*
1080 * Do notifications and return.
1081 */
1082 if (RT_SUCCESS(rc))
1083 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1084 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1085 return rc;
1086}
1087
1088
1089/**
1090 * Do init completed notifications.
1091 *
1092 * @returns VBox status code.
1093 * @param pVM The VM handle.
1094 * @param enmWhat What's completed.
1095 */
1096static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1097{
1098 int rc = VMMR3InitCompleted(pVM, enmWhat);
1099 if (RT_SUCCESS(rc))
1100 rc = HWACCMR3InitCompleted(pVM, enmWhat);
1101 if (RT_SUCCESS(rc))
1102 rc = PGMR3InitCompleted(pVM, enmWhat);
1103 return rc;
1104}
1105
1106
1107/**
1108 * Logger callback for inserting a custom prefix.
1109 *
1110 * @returns Number of chars written.
1111 * @param pLogger The logger.
1112 * @param pchBuf The output buffer.
1113 * @param cchBuf The output buffer size.
1114 * @param pvUser Pointer to the UVM structure.
1115 */
1116static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1117{
1118 AssertReturn(cchBuf >= 2, 0);
1119 PUVM pUVM = (PUVM)pvUser;
1120 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1121 if (pUVCpu)
1122 {
1123 static const char s_szHex[17] = "0123456789abcdef";
1124 VMCPUID const idCpu = pUVCpu->idCpu;
1125 pchBuf[1] = s_szHex[ idCpu & 15];
1126 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1127 }
1128 else
1129 {
1130 pchBuf[0] = 'x';
1131 pchBuf[1] = 'y';
1132 }
1133
1134 return 2;
1135}
1136
1137
1138/**
1139 * Calls the relocation functions for all VMM components so they can update
1140 * any GC pointers. When this function is called all the basic VM members
1141 * have been updated and the actual memory relocation have been done
1142 * by the PGM/MM.
1143 *
1144 * This is used both on init and on runtime relocations.
1145 *
1146 * @param pVM VM handle.
1147 * @param offDelta Relocation delta relative to old location.
1148 */
1149VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1150{
1151 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1152
1153 /*
1154 * The order here is very important!
1155 */
1156 PGMR3Relocate(pVM, offDelta);
1157 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1158 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1159 CPUMR3Relocate(pVM);
1160 HWACCMR3Relocate(pVM);
1161 SELMR3Relocate(pVM);
1162 VMMR3Relocate(pVM, offDelta);
1163 SELMR3Relocate(pVM); /* !hack! fix stack! */
1164 TRPMR3Relocate(pVM, offDelta);
1165 PATMR3Relocate(pVM);
1166 CSAMR3Relocate(pVM, offDelta);
1167 IOMR3Relocate(pVM, offDelta);
1168 EMR3Relocate(pVM);
1169 TMR3Relocate(pVM, offDelta);
1170 IEMR3Relocate(pVM);
1171 DBGFR3Relocate(pVM, offDelta);
1172 PDMR3Relocate(pVM, offDelta);
1173}
1174
1175
1176/**
1177 * EMT rendezvous worker for VMR3PowerOn.
1178 *
1179 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1180 * code, see FNVMMEMTRENDEZVOUS.)
1181 *
1182 * @param pVM The VM handle.
1183 * @param pVCpu The VMCPU handle of the EMT.
1184 * @param pvUser Ignored.
1185 */
1186static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1187{
1188 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1189 Assert(!pvUser); NOREF(pvUser);
1190
1191 /*
1192 * The first thread thru here tries to change the state. We shouldn't be
1193 * called again if this fails.
1194 */
1195 if (pVCpu->idCpu == pVM->cCpus - 1)
1196 {
1197 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1198 if (RT_FAILURE(rc))
1199 return rc;
1200 }
1201
1202 VMSTATE enmVMState = VMR3GetState(pVM);
1203 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1204 ("%s\n", VMR3GetStateName(enmVMState)),
1205 VERR_INTERNAL_ERROR_4);
1206
1207 /*
1208 * All EMTs changes their state to started.
1209 */
1210 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1211
1212 /*
1213 * EMT(0) is last thru here and it will make the notification calls
1214 * and advance the state.
1215 */
1216 if (pVCpu->idCpu == 0)
1217 {
1218 PDMR3PowerOn(pVM);
1219 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1220 }
1221
1222 return VINF_SUCCESS;
1223}
1224
1225
1226/**
1227 * Powers on the virtual machine.
1228 *
1229 * @returns VBox status code.
1230 *
1231 * @param pVM The VM to power on.
1232 *
1233 * @thread Any thread.
1234 * @vmstate Created
1235 * @vmstateto PoweringOn+Running
1236 */
1237VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1238{
1239 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1240 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1241
1242 /*
1243 * Gather all the EMTs to reduce the init TSC drift and keep
1244 * the state changing APIs a bit uniform.
1245 */
1246 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1247 vmR3PowerOn, NULL);
1248 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1249 return rc;
1250}
1251
1252
1253/**
1254 * Does the suspend notifications.
1255 *
1256 * @param pVM The VM handle.
1257 * @thread EMT(0)
1258 */
1259static void vmR3SuspendDoWork(PVM pVM)
1260{
1261 PDMR3Suspend(pVM);
1262}
1263
1264
1265/**
1266 * EMT rendezvous worker for VMR3Suspend.
1267 *
1268 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1269 * return code, see FNVMMEMTRENDEZVOUS.)
1270 *
1271 * @param pVM The VM handle.
1272 * @param pVCpu The VMCPU handle of the EMT.
1273 * @param pvUser Ignored.
1274 */
1275static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1276{
1277 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1278 Assert(!pvUser); NOREF(pvUser);
1279
1280 /*
1281 * The first EMT switches the state to suspending. If this fails because
1282 * something was racing us in one way or the other, there will be no more
1283 * calls and thus the state assertion below is not going to annoy anyone.
1284 */
1285 if (pVCpu->idCpu == pVM->cCpus - 1)
1286 {
1287 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1288 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1289 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1290 if (RT_FAILURE(rc))
1291 return rc;
1292 }
1293
1294 VMSTATE enmVMState = VMR3GetState(pVM);
1295 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1296 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1297 ("%s\n", VMR3GetStateName(enmVMState)),
1298 VERR_INTERNAL_ERROR_4);
1299
1300 /*
1301 * EMT(0) does the actually suspending *after* all the other CPUs have
1302 * been thru here.
1303 */
1304 if (pVCpu->idCpu == 0)
1305 {
1306 vmR3SuspendDoWork(pVM);
1307
1308 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1309 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1310 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1311 if (RT_FAILURE(rc))
1312 return VERR_INTERNAL_ERROR_3;
1313 }
1314
1315 return VINF_EM_SUSPEND;
1316}
1317
1318
1319/**
1320 * Suspends a running VM.
1321 *
1322 * @returns VBox status code. When called on EMT, this will be a strict status
1323 * code that has to be propagated up the call stack.
1324 *
1325 * @param pVM The VM to suspend.
1326 *
1327 * @thread Any thread.
1328 * @vmstate Running or RunningLS
1329 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1330 */
1331VMMR3DECL(int) VMR3Suspend(PVM pVM)
1332{
1333 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1334 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1335
1336 /*
1337 * Gather all the EMTs to make sure there are no races before
1338 * changing the VM state.
1339 */
1340 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1341 vmR3Suspend, NULL);
1342 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1343 return rc;
1344}
1345
1346
1347/**
1348 * EMT rendezvous worker for VMR3Resume.
1349 *
1350 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1351 * return code, see FNVMMEMTRENDEZVOUS.)
1352 *
1353 * @param pVM The VM handle.
1354 * @param pVCpu The VMCPU handle of the EMT.
1355 * @param pvUser Ignored.
1356 */
1357static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1358{
1359 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1360 Assert(!pvUser); NOREF(pvUser);
1361
1362 /*
1363 * The first thread thru here tries to change the state. We shouldn't be
1364 * called again if this fails.
1365 */
1366 if (pVCpu->idCpu == pVM->cCpus - 1)
1367 {
1368 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1369 if (RT_FAILURE(rc))
1370 return rc;
1371 }
1372
1373 VMSTATE enmVMState = VMR3GetState(pVM);
1374 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1375 ("%s\n", VMR3GetStateName(enmVMState)),
1376 VERR_INTERNAL_ERROR_4);
1377
1378#if 0
1379 /*
1380 * All EMTs changes their state to started.
1381 */
1382 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1383#endif
1384
1385 /*
1386 * EMT(0) is last thru here and it will make the notification calls
1387 * and advance the state.
1388 */
1389 if (pVCpu->idCpu == 0)
1390 {
1391 PDMR3Resume(pVM);
1392 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1393 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1394 }
1395
1396 return VINF_EM_RESUME;
1397}
1398
1399
1400/**
1401 * Resume VM execution.
1402 *
1403 * @returns VBox status code. When called on EMT, this will be a strict status
1404 * code that has to be propagated up the call stack.
1405 *
1406 * @param pVM The VM to resume.
1407 *
1408 * @thread Any thread.
1409 * @vmstate Suspended
1410 * @vmstateto Running
1411 */
1412VMMR3DECL(int) VMR3Resume(PVM pVM)
1413{
1414 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1415 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1416
1417 /*
1418 * Gather all the EMTs to make sure there are no races before
1419 * changing the VM state.
1420 */
1421 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1422 vmR3Resume, NULL);
1423 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1424 return rc;
1425}
1426
1427
1428/**
1429 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1430 * after the live step has been completed.
1431 *
1432 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1433 * return code, see FNVMMEMTRENDEZVOUS.)
1434 *
1435 * @param pVM The VM handle.
1436 * @param pVCpu The VMCPU handle of the EMT.
1437 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1438 */
1439static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1440{
1441 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1442 bool *pfSuspended = (bool *)pvUser;
1443
1444 /*
1445 * The first thread thru here tries to change the state. We shouldn't be
1446 * called again if this fails.
1447 */
1448 if (pVCpu->idCpu == pVM->cCpus - 1U)
1449 {
1450 PUVM pUVM = pVM->pUVM;
1451 int rc;
1452
1453 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1454 VMSTATE enmVMState = pVM->enmVMState;
1455 switch (enmVMState)
1456 {
1457 case VMSTATE_RUNNING_LS:
1458 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS);
1459 rc = VINF_SUCCESS;
1460 break;
1461
1462 case VMSTATE_SUSPENDED_EXT_LS:
1463 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1464 rc = VINF_SUCCESS;
1465 break;
1466
1467 case VMSTATE_DEBUGGING_LS:
1468 rc = VERR_TRY_AGAIN;
1469 break;
1470
1471 case VMSTATE_OFF_LS:
1472 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS);
1473 rc = VERR_SSM_LIVE_POWERED_OFF;
1474 break;
1475
1476 case VMSTATE_FATAL_ERROR_LS:
1477 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS);
1478 rc = VERR_SSM_LIVE_FATAL_ERROR;
1479 break;
1480
1481 case VMSTATE_GURU_MEDITATION_LS:
1482 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS);
1483 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1484 break;
1485
1486 case VMSTATE_POWERING_OFF_LS:
1487 case VMSTATE_SUSPENDING_EXT_LS:
1488 case VMSTATE_RESETTING_LS:
1489 default:
1490 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1491 rc = VERR_INTERNAL_ERROR_3;
1492 break;
1493 }
1494 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1495 if (RT_FAILURE(rc))
1496 {
1497 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1498 return rc;
1499 }
1500 }
1501
1502 VMSTATE enmVMState = VMR3GetState(pVM);
1503 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1504 ("%s\n", VMR3GetStateName(enmVMState)),
1505 VERR_INTERNAL_ERROR_4);
1506
1507 /*
1508 * Only EMT(0) have work to do since it's last thru here.
1509 */
1510 if (pVCpu->idCpu == 0)
1511 {
1512 vmR3SuspendDoWork(pVM);
1513 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1514 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1515 if (RT_FAILURE(rc))
1516 return VERR_INTERNAL_ERROR_3;
1517
1518 *pfSuspended = true;
1519 }
1520
1521 return VINF_EM_SUSPEND;
1522}
1523
1524
1525/**
1526 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1527 * SSMR3LiveDoStep1 failure.
1528 *
1529 * Doing this as a rendezvous operation avoids all annoying transition
1530 * states.
1531 *
1532 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1533 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1534 *
1535 * @param pVM The VM handle.
1536 * @param pVCpu The VMCPU handle of the EMT.
1537 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1538 */
1539static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1540{
1541 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1542 bool *pfSuspended = (bool *)pvUser;
1543 NOREF(pVCpu);
1544
1545 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1546 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1547 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1548 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1549 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1550 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1551 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1552 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1553 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1554 if (rc == 1)
1555 rc = VERR_SSM_LIVE_POWERED_OFF;
1556 else if (rc == 2)
1557 rc = VERR_SSM_LIVE_FATAL_ERROR;
1558 else if (rc == 3)
1559 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1560 else if (rc == 4)
1561 {
1562 *pfSuspended = true;
1563 rc = VINF_SUCCESS;
1564 }
1565 else if (rc > 0)
1566 rc = VINF_SUCCESS;
1567 return rc;
1568}
1569
1570
1571/**
1572 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1573 *
1574 * @returns VBox status code.
1575 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1576 *
1577 * @param pVM The VM handle.
1578 * @param pSSM The handle of saved state operation.
1579 *
1580 * @thread EMT(0)
1581 */
1582static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1583{
1584 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1585 VM_ASSERT_EMT0(pVM);
1586
1587 /*
1588 * Advance the state and mark if VMR3Suspend was called.
1589 */
1590 int rc = VINF_SUCCESS;
1591 VMSTATE enmVMState = VMR3GetState(pVM);
1592 if (enmVMState == VMSTATE_SUSPENDED_LS)
1593 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1594 else
1595 {
1596 if (enmVMState != VMSTATE_SAVING)
1597 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1598 rc = VINF_SSM_LIVE_SUSPENDED;
1599 }
1600
1601 /*
1602 * Finish up and release the handle. Careful with the status codes.
1603 */
1604 int rc2 = SSMR3LiveDoStep2(pSSM);
1605 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1606 rc = rc2;
1607
1608 rc2 = SSMR3LiveDone(pSSM);
1609 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1610 rc = rc2;
1611
1612 /*
1613 * Advance to the final state and return.
1614 */
1615 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1616 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1617 return rc;
1618}
1619
1620
1621/**
1622 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1623 * SSMR3LiveSave.
1624 *
1625 * @returns VBox status code.
1626 *
1627 * @param pVM The VM handle.
1628 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1629 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1630 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1631 * @param pvStreamOpsUser The user argument to the stream methods.
1632 * @param enmAfter What to do afterwards.
1633 * @param pfnProgress Progress callback. Optional.
1634 * @param pvProgressUser User argument for the progress callback.
1635 * @param ppSSM Where to return the saved state handle in case of a
1636 * live snapshot scenario.
1637 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1638 *
1639 * @thread EMT
1640 */
1641static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1642 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM,
1643 bool fSkipStateChanges)
1644{
1645 int rc = VINF_SUCCESS;
1646
1647 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1648 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1649
1650 /*
1651 * Validate input.
1652 */
1653 AssertPtrNull(pszFilename);
1654 AssertPtrNull(pStreamOps);
1655 AssertPtr(pVM);
1656 Assert( enmAfter == SSMAFTER_DESTROY
1657 || enmAfter == SSMAFTER_CONTINUE
1658 || enmAfter == SSMAFTER_TELEPORT);
1659 AssertPtr(ppSSM);
1660 *ppSSM = NULL;
1661
1662 /*
1663 * Change the state and perform/start the saving.
1664 */
1665 if (!fSkipStateChanges)
1666 {
1667 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1668 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1669 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1670 }
1671 else
1672 {
1673 Assert(enmAfter != SSMAFTER_TELEPORT);
1674 rc = 1;
1675 }
1676
1677 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1678 {
1679 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1680 if (!fSkipStateChanges)
1681 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1682 }
1683 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1684 {
1685 Assert(!fSkipStateChanges);
1686 if (enmAfter == SSMAFTER_TELEPORT)
1687 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1688 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1689 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1690 /* (We're not subject to cancellation just yet.) */
1691 }
1692 else
1693 Assert(RT_FAILURE(rc));
1694 return rc;
1695}
1696
1697
1698/**
1699 * Common worker for VMR3Save and VMR3Teleport.
1700 *
1701 * @returns VBox status code.
1702 *
1703 * @param pVM The VM handle.
1704 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1705 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1706 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1707 * @param pvStreamOpsUser The user argument to the stream methods.
1708 * @param enmAfter What to do afterwards.
1709 * @param pfnProgress Progress callback. Optional.
1710 * @param pvProgressUser User argument for the progress callback.
1711 * @param pfSuspended Set if we suspended the VM.
1712 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1713 *
1714 * @thread Non-EMT
1715 */
1716static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1717 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1718 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended,
1719 bool fSkipStateChanges)
1720{
1721 /*
1722 * Request the operation in EMT(0).
1723 */
1724 PSSMHANDLE pSSM;
1725 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1726 (PFNRT)vmR3Save, 10, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1727 enmAfter, pfnProgress, pvProgressUser, &pSSM, fSkipStateChanges);
1728 if ( RT_SUCCESS(rc)
1729 && pSSM)
1730 {
1731 Assert(!fSkipStateChanges);
1732
1733 /*
1734 * Live snapshot.
1735 *
1736 * The state handling here is kind of tricky, doing it on EMT(0) helps
1737 * a bit. See the VMSTATE diagram for details.
1738 */
1739 rc = SSMR3LiveDoStep1(pSSM);
1740 if (RT_SUCCESS(rc))
1741 {
1742 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1743 for (;;)
1744 {
1745 /* Try suspend the VM. */
1746 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1747 vmR3LiveDoSuspend, pfSuspended);
1748 if (rc != VERR_TRY_AGAIN)
1749 break;
1750
1751 /* Wait for the state to change. */
1752 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1753 }
1754 if (RT_SUCCESS(rc))
1755 rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1756 else
1757 {
1758 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1759 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1760 }
1761 }
1762 else
1763 {
1764 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1765 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1766
1767 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1768 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1769 rc = rc2;
1770 }
1771 }
1772
1773 return rc;
1774}
1775
1776
1777/**
1778 * Save current VM state.
1779 *
1780 * Can be used for both saving the state and creating snapshots.
1781 *
1782 * When called for a VM in the Running state, the saved state is created live
1783 * and the VM is only suspended when the final part of the saving is preformed.
1784 * The VM state will not be restored to Running in this case and it's up to the
1785 * caller to call VMR3Resume if this is desirable. (The rational is that the
1786 * caller probably wish to reconfigure the disks before resuming the VM.)
1787 *
1788 * @returns VBox status code.
1789 *
1790 * @param pVM The VM which state should be saved.
1791 * @param pszFilename The name of the save state file.
1792 * @param pStreamOps The stream methods.
1793 * @param pvStreamOpsUser The user argument to the stream methods.
1794 * @param fContinueAfterwards Whether continue execution afterwards or not.
1795 * When in doubt, set this to true.
1796 * @param pfnProgress Progress callback. Optional.
1797 * @param pvUser User argument for the progress callback.
1798 * @param pfSuspended Set if we suspended the VM.
1799 *
1800 * @thread Non-EMT.
1801 * @vmstate Suspended or Running
1802 * @vmstateto Saving+Suspended or
1803 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1804 */
1805VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
1806{
1807 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1808 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1809
1810 /*
1811 * Validate input.
1812 */
1813 AssertPtr(pfSuspended);
1814 *pfSuspended = false;
1815 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1816 VM_ASSERT_OTHER_THREAD(pVM);
1817 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1818 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1819 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1820
1821 /*
1822 * Join paths with VMR3Teleport.
1823 */
1824 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1825 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1826 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1827 enmAfter, pfnProgress, pvUser, pfSuspended,
1828 false /* fSkipStateChanges */);
1829 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1830 return rc;
1831}
1832
1833/**
1834 * Save current VM state (used by FTM)
1835 *
1836 * Can be used for both saving the state and creating snapshots.
1837 *
1838 * When called for a VM in the Running state, the saved state is created live
1839 * and the VM is only suspended when the final part of the saving is preformed.
1840 * The VM state will not be restored to Running in this case and it's up to the
1841 * caller to call VMR3Resume if this is desirable. (The rational is that the
1842 * caller probably wish to reconfigure the disks before resuming the VM.)
1843 *
1844 * @returns VBox status code.
1845 *
1846 * @param pVM The VM which state should be saved.
1847 * @param pStreamOps The stream methods.
1848 * @param pvStreamOpsUser The user argument to the stream methods.
1849 * @param pfSuspended Set if we suspended the VM.
1850 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1851 *
1852 * @thread Any
1853 * @vmstate Suspended or Running
1854 * @vmstateto Saving+Suspended or
1855 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1856 */
1857VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended,
1858 bool fSkipStateChanges)
1859{
1860 LogFlow(("VMR3SaveFT: pVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
1861 pVM, pStreamOps, pvStreamOpsUser, pfSuspended));
1862
1863 /*
1864 * Validate input.
1865 */
1866 AssertPtr(pfSuspended);
1867 *pfSuspended = false;
1868 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1869 AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
1870
1871 /*
1872 * Join paths with VMR3Teleport.
1873 */
1874 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1875 NULL, pStreamOps, pvStreamOpsUser,
1876 SSMAFTER_CONTINUE, NULL, NULL, pfSuspended,
1877 fSkipStateChanges);
1878 LogFlow(("VMR3SaveFT: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1879 return rc;
1880}
1881
1882
1883/**
1884 * Teleport the VM (aka live migration).
1885 *
1886 * @returns VBox status code.
1887 *
1888 * @param pVM The VM which state should be saved.
1889 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1890 * @param pStreamOps The stream methods.
1891 * @param pvStreamOpsUser The user argument to the stream methods.
1892 * @param pfnProgress Progress callback. Optional.
1893 * @param pvProgressUser User argument for the progress callback.
1894 * @param pfSuspended Set if we suspended the VM.
1895 *
1896 * @thread Non-EMT.
1897 * @vmstate Suspended or Running
1898 * @vmstateto Saving+Suspended or
1899 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1900 */
1901VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1902 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1903{
1904 LogFlow(("VMR3Teleport: pVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1905 pVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1906
1907 /*
1908 * Validate input.
1909 */
1910 AssertPtr(pfSuspended);
1911 *pfSuspended = false;
1912 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1913 VM_ASSERT_OTHER_THREAD(pVM);
1914 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1915 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1916
1917 /*
1918 * Join paths with VMR3Save.
1919 */
1920 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
1921 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1922 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended,
1923 false /* fSkipStateChanges */);
1924 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1925 return rc;
1926}
1927
1928
1929
1930/**
1931 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1932 *
1933 * @returns VBox status code.
1934 *
1935 * @param pVM The VM handle.
1936 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1937 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1938 * @param pvStreamOpsUser The user argument to the stream methods.
1939 * @param pfnProgress Progress callback. Optional.
1940 * @param pvUser User argument for the progress callback.
1941 * @param fTeleporting Indicates whether we're teleporting or not.
1942 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1943 *
1944 * @thread EMT.
1945 */
1946static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1947 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
1948 bool fSkipStateChanges)
1949{
1950 int rc = VINF_SUCCESS;
1951
1952 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1953 pVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1954
1955 /*
1956 * Validate input (paranoia).
1957 */
1958 AssertPtr(pVM);
1959 AssertPtrNull(pszFilename);
1960 AssertPtrNull(pStreamOps);
1961 AssertPtrNull(pfnProgress);
1962
1963 if (!fSkipStateChanges)
1964 {
1965 /*
1966 * Change the state and perform the load.
1967 *
1968 * Always perform a relocation round afterwards to make sure hypervisor
1969 * selectors and such are correct.
1970 */
1971 rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1972 VMSTATE_LOADING, VMSTATE_CREATED,
1973 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1974 if (RT_FAILURE(rc))
1975 return rc;
1976 }
1977 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1978
1979 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pVM);
1980 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1981 if (RT_SUCCESS(rc))
1982 {
1983 VMR3Relocate(pVM, 0 /*offDelta*/);
1984 if (!fSkipStateChanges)
1985 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1986 }
1987 else
1988 {
1989 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1990 if (!fSkipStateChanges)
1991 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1992
1993 if (cErrorsPriorToSave == VMR3GetErrorCount(pVM))
1994 rc = VMSetError(pVM, rc, RT_SRC_POS,
1995 N_("Unable to restore the virtual machine's saved state from '%s'. "
1996 "It may be damaged or from an older version of VirtualBox. "
1997 "Please discard the saved state before starting the virtual machine"),
1998 pszFilename);
1999 }
2000
2001 return rc;
2002}
2003
2004
2005/**
2006 * Loads a VM state into a newly created VM or a one that is suspended.
2007 *
2008 * To restore a saved state on VM startup, call this function and then resume
2009 * the VM instead of powering it on.
2010 *
2011 * @returns VBox status code.
2012 *
2013 * @param pVM The VM handle.
2014 * @param pszFilename The name of the save state file.
2015 * @param pfnProgress Progress callback. Optional.
2016 * @param pvUser User argument for the progress callback.
2017 *
2018 * @thread Any thread.
2019 * @vmstate Created, Suspended
2020 * @vmstateto Loading+Suspended
2021 */
2022VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
2023{
2024 LogFlow(("VMR3LoadFromFile: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
2025 pVM, pszFilename, pszFilename, pfnProgress, pvUser));
2026
2027 /*
2028 * Validate input.
2029 */
2030 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2031 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
2032
2033 /*
2034 * Forward the request to EMT(0). No need to setup a rendezvous here
2035 * since there is no execution taking place when this call is allowed.
2036 */
2037 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2038 pVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
2039 false /*fTeleporting*/, false /* fSkipStateChanges */);
2040 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
2041 return rc;
2042}
2043
2044
2045/**
2046 * VMR3LoadFromFile for arbitrary file streams.
2047 *
2048 * @returns VBox status code.
2049 *
2050 * @param pVM The VM handle.
2051 * @param pStreamOps The stream methods.
2052 * @param pvStreamOpsUser The user argument to the stream methods.
2053 * @param pfnProgress Progress callback. Optional.
2054 * @param pvProgressUser User argument for the progress callback.
2055 *
2056 * @thread Any thread.
2057 * @vmstate Created, Suspended
2058 * @vmstateto Loading+Suspended
2059 */
2060VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2061 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
2062{
2063 LogFlow(("VMR3LoadFromStream: pVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
2064 pVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2065
2066 /*
2067 * Validate input.
2068 */
2069 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2070 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2071
2072 /*
2073 * Forward the request to EMT(0). No need to setup a rendezvous here
2074 * since there is no execution taking place when this call is allowed.
2075 */
2076 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2077 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
2078 true /*fTeleporting*/, false /* fSkipStateChanges */);
2079 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2080 return rc;
2081}
2082
2083
2084/**
2085 * VMR3LoadFromFileFT for arbitrary file streams.
2086 *
2087 * @returns VBox status code.
2088 *
2089 * @param pVM The VM handle.
2090 * @param pStreamOps The stream methods.
2091 * @param pvStreamOpsUser The user argument to the stream methods.
2092 * @param pfnProgress Progress callback. Optional.
2093 * @param pvProgressUser User argument for the progress callback.
2094 *
2095 * @thread Any thread.
2096 * @vmstate Created, Suspended
2097 * @vmstateto Loading+Suspended
2098 */
2099VMMR3DECL(int) VMR3LoadFromStreamFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
2100{
2101 LogFlow(("VMR3LoadFromStreamFT: pVM=%p pStreamOps=%p pvStreamOpsUser=%p\n",
2102 pVM, pStreamOps, pvStreamOpsUser));
2103
2104 /*
2105 * Validate input.
2106 */
2107 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2108 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2109
2110 /*
2111 * Forward the request to EMT(0). No need to setup a rendezvous here
2112 * since there is no execution taking place when this call is allowed.
2113 */
2114 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2115 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
2116 true /*fTeleporting*/, true /* fSkipStateChanges */);
2117 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2118 return rc;
2119}
2120
2121/**
2122 * EMT rendezvous worker for VMR3PowerOff.
2123 *
2124 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
2125 * return code, see FNVMMEMTRENDEZVOUS.)
2126 *
2127 * @param pVM The VM handle.
2128 * @param pVCpu The VMCPU handle of the EMT.
2129 * @param pvUser Ignored.
2130 */
2131static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
2132{
2133 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
2134 Assert(!pvUser); NOREF(pvUser);
2135
2136 /*
2137 * The first EMT thru here will change the state to PoweringOff.
2138 */
2139 if (pVCpu->idCpu == pVM->cCpus - 1)
2140 {
2141 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
2142 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
2143 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
2144 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
2145 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
2146 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2147 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2148 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2149 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2150 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2151 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2152 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2153 if (RT_FAILURE(rc))
2154 return rc;
2155 if (rc >= 7)
2156 SSMR3Cancel(pVM);
2157 }
2158
2159 /*
2160 * Check the state.
2161 */
2162 VMSTATE enmVMState = VMR3GetState(pVM);
2163 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2164 || enmVMState == VMSTATE_POWERING_OFF_LS,
2165 ("%s\n", VMR3GetStateName(enmVMState)),
2166 VERR_VM_INVALID_VM_STATE);
2167
2168 /*
2169 * EMT(0) does the actual power off work here *after* all the other EMTs
2170 * have been thru and entered the STOPPED state.
2171 */
2172 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2173 if (pVCpu->idCpu == 0)
2174 {
2175 /*
2176 * For debugging purposes, we will log a summary of the guest state at this point.
2177 */
2178 if (enmVMState != VMSTATE_GURU_MEDITATION)
2179 {
2180 /** @todo SMP support? */
2181 /** @todo make the state dumping at VMR3PowerOff optional. */
2182 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2183 RTLogRelPrintf("****************** Guest state at power off ******************\n");
2184 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2185 RTLogRelPrintf("***\n");
2186 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
2187 RTLogRelPrintf("***\n");
2188 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2189 RTLogRelPrintf("***\n");
2190 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2191 /** @todo dump guest call stack. */
2192#if 1 // "temporary" while debugging #1589
2193 RTLogRelPrintf("***\n");
2194 uint32_t esp = CPUMGetGuestESP(pVCpu);
2195 if ( CPUMGetGuestSS(pVCpu) == 0
2196 && esp < _64K)
2197 {
2198 uint8_t abBuf[PAGE_SIZE];
2199 RTLogRelPrintf("***\n"
2200 "ss:sp=0000:%04x ", esp);
2201 uint32_t Start = esp & ~(uint32_t)63;
2202 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
2203 if (RT_SUCCESS(rc))
2204 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
2205 "%.*Rhxd\n",
2206 Start, Start + 0x100 - 1,
2207 0x100, abBuf);
2208 else
2209 RTLogRelPrintf("rc=%Rrc\n", rc);
2210
2211 /* grub ... */
2212 if (esp < 0x2000 && esp > 0x1fc0)
2213 {
2214 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
2215 if (RT_SUCCESS(rc))
2216 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
2217 "%.*Rhxd\n",
2218 0x800, abBuf);
2219 }
2220 /* microsoft cdrom hang ... */
2221 if (true)
2222 {
2223 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
2224 if (RT_SUCCESS(rc))
2225 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
2226 "%.*Rhxd\n",
2227 0x200, abBuf);
2228 }
2229 }
2230#endif
2231 RTLogRelSetBuffering(fOldBuffered);
2232 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2233 }
2234
2235 /*
2236 * Perform the power off notifications and advance the state to
2237 * Off or OffLS.
2238 */
2239 PDMR3PowerOff(pVM);
2240
2241 PUVM pUVM = pVM->pUVM;
2242 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2243 enmVMState = pVM->enmVMState;
2244 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2245 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
2246 else
2247 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF);
2248 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2249 }
2250 return VINF_EM_OFF;
2251}
2252
2253
2254/**
2255 * Power off the VM.
2256 *
2257 * @returns VBox status code. When called on EMT, this will be a strict status
2258 * code that has to be propagated up the call stack.
2259 *
2260 * @param pVM The handle of the VM to be powered off.
2261 *
2262 * @thread Any thread.
2263 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2264 * @vmstateto Off or OffLS
2265 */
2266VMMR3DECL(int) VMR3PowerOff(PVM pVM)
2267{
2268 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
2269 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2270
2271 /*
2272 * Gather all the EMTs to make sure there are no races before
2273 * changing the VM state.
2274 */
2275 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2276 vmR3PowerOff, NULL);
2277 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2278 return rc;
2279}
2280
2281
2282/**
2283 * Destroys the VM.
2284 *
2285 * The VM must be powered off (or never really powered on) to call this
2286 * function. The VM handle is destroyed and can no longer be used up successful
2287 * return.
2288 *
2289 * @returns VBox status code.
2290 *
2291 * @param pVM The handle of the VM which should be destroyed.
2292 *
2293 * @thread Any none emulation thread.
2294 * @vmstate Off, Created
2295 * @vmstateto N/A
2296 */
2297VMMR3DECL(int) VMR3Destroy(PVM pVM)
2298{
2299 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
2300
2301 /*
2302 * Validate input.
2303 */
2304 if (!pVM)
2305 return VERR_INVALID_VM_HANDLE;
2306 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2307 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2308
2309 /*
2310 * Change VM state to destroying and unlink the VM.
2311 */
2312 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2313 if (RT_FAILURE(rc))
2314 return rc;
2315
2316 /** @todo lock this when we start having multiple machines in a process... */
2317 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
2318 if (g_pUVMsHead == pUVM)
2319 g_pUVMsHead = pUVM->pNext;
2320 else
2321 {
2322 PUVM pPrev = g_pUVMsHead;
2323 while (pPrev && pPrev->pNext != pUVM)
2324 pPrev = pPrev->pNext;
2325 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
2326
2327 pPrev->pNext = pUVM->pNext;
2328 }
2329 pUVM->pNext = NULL;
2330
2331 /*
2332 * Notify registered at destruction listeners.
2333 */
2334 vmR3AtDtor(pVM);
2335
2336 /*
2337 * Call vmR3Destroy on each of the EMTs ending with EMT(0) doing the bulk
2338 * of the cleanup.
2339 */
2340 /* vmR3Destroy on all EMTs, ending with EMT(0). */
2341 rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2342 AssertLogRelRC(rc);
2343
2344 /* Wait for EMTs and destroy the UVM. */
2345 vmR3DestroyUVM(pUVM, 30000);
2346
2347 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2348 return VINF_SUCCESS;
2349}
2350
2351
2352/**
2353 * Internal destruction worker.
2354 *
2355 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2356 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2357 * VMR3Destroy().
2358 *
2359 * When called on EMT(0), it will performed the great bulk of the destruction.
2360 * When called on the other EMTs, they will do nothing and the whole purpose is
2361 * to return VINF_EM_TERMINATE so they break out of their run loops.
2362 *
2363 * @returns VINF_EM_TERMINATE.
2364 * @param pVM The VM handle.
2365 */
2366DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2367{
2368 PUVM pUVM = pVM->pUVM;
2369 PVMCPU pVCpu = VMMGetCpu(pVM);
2370 Assert(pVCpu);
2371 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2372
2373 /*
2374 * Only VCPU 0 does the full cleanup (last).
2375 */
2376 if (pVCpu->idCpu == 0)
2377 {
2378 /*
2379 * Dump statistics to the log.
2380 */
2381#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2382 RTLogFlags(NULL, "nodisabled nobuffered");
2383#endif
2384#ifdef VBOX_WITH_STATISTICS
2385 STAMR3Dump(pVM, "*");
2386#else
2387 LogRel(("************************* Statistics *************************\n"));
2388 STAMR3DumpToReleaseLog(pVM, "*");
2389 LogRel(("********************* End of statistics **********************\n"));
2390#endif
2391
2392 /*
2393 * Destroy the VM components.
2394 */
2395 int rc = TMR3Term(pVM);
2396 AssertRC(rc);
2397#ifdef VBOX_WITH_DEBUGGER
2398 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
2399 pUVM->vm.s.pvDBGC = NULL;
2400#endif
2401 AssertRC(rc);
2402 rc = FTMR3Term(pVM);
2403 AssertRC(rc);
2404 rc = DBGFR3Term(pVM);
2405 AssertRC(rc);
2406 rc = PDMR3Term(pVM);
2407 AssertRC(rc);
2408 rc = IEMR3Term(pVM);
2409 AssertRC(rc);
2410 rc = EMR3Term(pVM);
2411 AssertRC(rc);
2412 rc = IOMR3Term(pVM);
2413 AssertRC(rc);
2414 rc = CSAMR3Term(pVM);
2415 AssertRC(rc);
2416 rc = PATMR3Term(pVM);
2417 AssertRC(rc);
2418 rc = TRPMR3Term(pVM);
2419 AssertRC(rc);
2420 rc = SELMR3Term(pVM);
2421 AssertRC(rc);
2422 rc = REMR3Term(pVM);
2423 AssertRC(rc);
2424 rc = HWACCMR3Term(pVM);
2425 AssertRC(rc);
2426 rc = PGMR3Term(pVM);
2427 AssertRC(rc);
2428 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2429 AssertRC(rc);
2430 rc = CPUMR3Term(pVM);
2431 AssertRC(rc);
2432 SSMR3Term(pVM);
2433 rc = PDMR3CritSectTerm(pVM);
2434 AssertRC(rc);
2435 rc = MMR3Term(pVM);
2436 AssertRC(rc);
2437
2438 /*
2439 * We're done, tell the other EMTs to quit.
2440 */
2441 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2442 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2443 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2444 }
2445 return VINF_EM_TERMINATE;
2446}
2447
2448
2449/**
2450 * Destroys the UVM portion.
2451 *
2452 * This is called as the final step in the VM destruction or as the cleanup
2453 * in case of a creation failure.
2454 *
2455 * @param pVM VM Handle.
2456 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2457 * threads.
2458 */
2459static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2460{
2461 /*
2462 * Signal termination of each the emulation threads and
2463 * wait for them to complete.
2464 */
2465 /* Signal them. */
2466 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2467 if (pUVM->pVM)
2468 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2469 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2470 {
2471 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2472 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2473 }
2474
2475 /* Wait for them. */
2476 uint64_t NanoTS = RTTimeNanoTS();
2477 RTTHREAD hSelf = RTThreadSelf();
2478 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2479 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2480 {
2481 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2482 if ( hThread != NIL_RTTHREAD
2483 && hThread != hSelf)
2484 {
2485 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2486 int rc2 = RTThreadWait(hThread,
2487 cMilliesElapsed < cMilliesEMTWait
2488 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2489 : 2000,
2490 NULL);
2491 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2492 rc2 = RTThreadWait(hThread, 1000, NULL);
2493 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2494 if (RT_SUCCESS(rc2))
2495 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2496 }
2497 }
2498
2499 /* Cleanup the semaphores. */
2500 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2501 {
2502 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2503 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2504 }
2505
2506 /*
2507 * Free the event semaphores associated with the request packets.
2508 */
2509 unsigned cReqs = 0;
2510 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2511 {
2512 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2513 pUVM->vm.s.apReqFree[i] = NULL;
2514 for (; pReq; pReq = pReq->pNext, cReqs++)
2515 {
2516 pReq->enmState = VMREQSTATE_INVALID;
2517 RTSemEventDestroy(pReq->EventSem);
2518 }
2519 }
2520 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2521
2522 /*
2523 * Kill all queued requests. (There really shouldn't be any!)
2524 */
2525 for (unsigned i = 0; i < 10; i++)
2526 {
2527 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pReqs, NULL, PVMREQ);
2528 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2529 if (!pReqHead)
2530 break;
2531 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2532 {
2533 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_INTERNAL_ERROR);
2534 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2535 RTSemEventSignal(pReq->EventSem);
2536 RTThreadSleep(2);
2537 RTSemEventDestroy(pReq->EventSem);
2538 }
2539 /* give them a chance to respond before we free the request memory. */
2540 RTThreadSleep(32);
2541 }
2542
2543 /*
2544 * Now all queued VCPU requests (again, there shouldn't be any).
2545 */
2546 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2547 {
2548 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2549
2550 for (unsigned i = 0; i < 10; i++)
2551 {
2552 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pReqs, NULL, PVMREQ);
2553 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2554 if (!pReqHead)
2555 break;
2556 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2557 {
2558 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_INTERNAL_ERROR);
2559 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2560 RTSemEventSignal(pReq->EventSem);
2561 RTThreadSleep(2);
2562 RTSemEventDestroy(pReq->EventSem);
2563 }
2564 /* give them a chance to respond before we free the request memory. */
2565 RTThreadSleep(32);
2566 }
2567 }
2568
2569 /*
2570 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2571 */
2572 PDMR3TermUVM(pUVM);
2573
2574 /*
2575 * Terminate the support library if initialized.
2576 */
2577 if (pUVM->vm.s.pSession)
2578 {
2579 int rc = SUPR3Term(false /*fForced*/);
2580 AssertRC(rc);
2581 pUVM->vm.s.pSession = NIL_RTR0PTR;
2582 }
2583
2584 /*
2585 * Release the UVM structure reference.
2586 */
2587 VMR3ReleaseUVM(pUVM);
2588
2589 /*
2590 * Clean up and flush logs.
2591 */
2592#ifdef LOG_ENABLED
2593 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2594#endif
2595 RTLogFlush(NULL);
2596}
2597
2598
2599/**
2600 * Enumerates the VMs in this process.
2601 *
2602 * @returns Pointer to the next VM.
2603 * @returns NULL when no more VMs.
2604 * @param pVMPrev The previous VM
2605 * Use NULL to start the enumeration.
2606 */
2607VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2608{
2609 /*
2610 * This is quick and dirty. It has issues with VM being
2611 * destroyed during the enumeration.
2612 */
2613 PUVM pNext;
2614 if (pVMPrev)
2615 pNext = pVMPrev->pUVM->pNext;
2616 else
2617 pNext = g_pUVMsHead;
2618 return pNext ? pNext->pVM : NULL;
2619}
2620
2621
2622/**
2623 * Registers an at VM destruction callback.
2624 *
2625 * @returns VBox status code.
2626 * @param pfnAtDtor Pointer to callback.
2627 * @param pvUser User argument.
2628 */
2629VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2630{
2631 /*
2632 * Check if already registered.
2633 */
2634 VM_ATDTOR_LOCK();
2635 PVMATDTOR pCur = g_pVMAtDtorHead;
2636 while (pCur)
2637 {
2638 if (pfnAtDtor == pCur->pfnAtDtor)
2639 {
2640 VM_ATDTOR_UNLOCK();
2641 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2642 return VERR_INVALID_PARAMETER;
2643 }
2644
2645 /* next */
2646 pCur = pCur->pNext;
2647 }
2648 VM_ATDTOR_UNLOCK();
2649
2650 /*
2651 * Allocate new entry.
2652 */
2653 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2654 if (!pVMAtDtor)
2655 return VERR_NO_MEMORY;
2656
2657 VM_ATDTOR_LOCK();
2658 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2659 pVMAtDtor->pvUser = pvUser;
2660 pVMAtDtor->pNext = g_pVMAtDtorHead;
2661 g_pVMAtDtorHead = pVMAtDtor;
2662 VM_ATDTOR_UNLOCK();
2663
2664 return VINF_SUCCESS;
2665}
2666
2667
2668/**
2669 * Deregisters an at VM destruction callback.
2670 *
2671 * @returns VBox status code.
2672 * @param pfnAtDtor Pointer to callback.
2673 */
2674VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2675{
2676 /*
2677 * Find it, unlink it and free it.
2678 */
2679 VM_ATDTOR_LOCK();
2680 PVMATDTOR pPrev = NULL;
2681 PVMATDTOR pCur = g_pVMAtDtorHead;
2682 while (pCur)
2683 {
2684 if (pfnAtDtor == pCur->pfnAtDtor)
2685 {
2686 if (pPrev)
2687 pPrev->pNext = pCur->pNext;
2688 else
2689 g_pVMAtDtorHead = pCur->pNext;
2690 pCur->pNext = NULL;
2691 VM_ATDTOR_UNLOCK();
2692
2693 RTMemFree(pCur);
2694 return VINF_SUCCESS;
2695 }
2696
2697 /* next */
2698 pPrev = pCur;
2699 pCur = pCur->pNext;
2700 }
2701 VM_ATDTOR_UNLOCK();
2702
2703 return VERR_INVALID_PARAMETER;
2704}
2705
2706
2707/**
2708 * Walks the list of at VM destructor callbacks.
2709 * @param pVM The VM which is about to be destroyed.
2710 */
2711static void vmR3AtDtor(PVM pVM)
2712{
2713 /*
2714 * Find it, unlink it and free it.
2715 */
2716 VM_ATDTOR_LOCK();
2717 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2718 pCur->pfnAtDtor(pVM, pCur->pvUser);
2719 VM_ATDTOR_UNLOCK();
2720}
2721
2722
2723/**
2724 * Worker which checks integrity of some internal structures.
2725 * This is yet another attempt to track down that AVL tree crash.
2726 */
2727static void vmR3CheckIntegrity(PVM pVM)
2728{
2729#ifdef VBOX_STRICT
2730 int rc = PGMR3CheckIntegrity(pVM);
2731 AssertReleaseRC(rc);
2732#endif
2733}
2734
2735
2736/**
2737 * EMT rendezvous worker for VMR3Reset.
2738 *
2739 * This is called by the emulation threads as a response to the reset request
2740 * issued by VMR3Reset().
2741 *
2742 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2743 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2744 *
2745 * @param pVM The VM handle.
2746 * @param pVCpu The VMCPU handle of the EMT.
2747 * @param pvUser Ignored.
2748 */
2749static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2750{
2751 Assert(!pvUser); NOREF(pvUser);
2752
2753 /*
2754 * The first EMT will try change the state to resetting. If this fails,
2755 * we won't get called for the other EMTs.
2756 */
2757 if (pVCpu->idCpu == pVM->cCpus - 1)
2758 {
2759 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3,
2760 VMSTATE_RESETTING, VMSTATE_RUNNING,
2761 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2762 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2763 if (RT_FAILURE(rc))
2764 return rc;
2765 }
2766
2767 /*
2768 * Check the state.
2769 */
2770 VMSTATE enmVMState = VMR3GetState(pVM);
2771 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2772 || enmVMState == VMSTATE_RESETTING_LS,
2773 ("%s\n", VMR3GetStateName(enmVMState)),
2774 VERR_INTERNAL_ERROR_4);
2775
2776 /*
2777 * EMT(0) does the full cleanup *after* all the other EMTs has been
2778 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2779 *
2780 * Because there are per-cpu reset routines and order may/is important,
2781 * the following sequence looks a bit ugly...
2782 */
2783 if (pVCpu->idCpu == 0)
2784 vmR3CheckIntegrity(pVM);
2785
2786 /* Reset the VCpu state. */
2787 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2788
2789 /* Clear all pending forced actions. */
2790 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2791
2792 /*
2793 * Reset the VM components.
2794 */
2795 if (pVCpu->idCpu == 0)
2796 {
2797 PATMR3Reset(pVM);
2798 CSAMR3Reset(pVM);
2799 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2800 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2801/** @todo PGMR3Reset should be called after PDMR3Reset really, because we'll trash OS <-> hardware
2802 * communication structures residing in RAM when done in the other order. I.e. the device must be
2803 * quiesced first, then we clear the memory and plan tables. Probably have to make these things
2804 * explicit in some way, some memory setup pass or something.
2805 * (Example: DevAHCI may assert if memory is zeroed before it has read the FIS.)
2806 *
2807 * @bugref{4467}
2808 */
2809 MMR3Reset(pVM);
2810 PDMR3Reset(pVM);
2811 SELMR3Reset(pVM);
2812 TRPMR3Reset(pVM);
2813 REMR3Reset(pVM);
2814 IOMR3Reset(pVM);
2815 CPUMR3Reset(pVM);
2816 }
2817 CPUMR3ResetCpu(pVCpu);
2818 if (pVCpu->idCpu == 0)
2819 {
2820 TMR3Reset(pVM);
2821 EMR3Reset(pVM);
2822 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2823
2824#ifdef LOG_ENABLED
2825 /*
2826 * Debug logging.
2827 */
2828 RTLogPrintf("\n\nThe VM was reset:\n");
2829 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2830#endif
2831
2832 /*
2833 * Since EMT(0) is the last to go thru here, it will advance the state.
2834 * When a live save is active, we will move on to SuspendingLS but
2835 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2836 */
2837 PUVM pUVM = pVM->pUVM;
2838 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2839 enmVMState = pVM->enmVMState;
2840 if (enmVMState == VMSTATE_RESETTING)
2841 {
2842 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2843 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2844 else
2845 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING);
2846 }
2847 else
2848 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS);
2849 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2850
2851 vmR3CheckIntegrity(pVM);
2852
2853 /*
2854 * Do the suspend bit as well.
2855 * It only requires some EMT(0) work at present.
2856 */
2857 if (enmVMState != VMSTATE_RESETTING)
2858 {
2859 vmR3SuspendDoWork(pVM);
2860 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2861 }
2862 }
2863
2864 return enmVMState == VMSTATE_RESETTING
2865 ? VINF_EM_RESET
2866 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2867}
2868
2869
2870/**
2871 * Reset the current VM.
2872 *
2873 * @returns VBox status code.
2874 * @param pVM VM to reset.
2875 */
2876VMMR3DECL(int) VMR3Reset(PVM pVM)
2877{
2878 LogFlow(("VMR3Reset:\n"));
2879 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2880
2881 /*
2882 * Gather all the EMTs to make sure there are no races before
2883 * changing the VM state.
2884 */
2885 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2886 vmR3Reset, NULL);
2887 LogFlow(("VMR3Reset: returns %Rrc\n", rc));
2888 return rc;
2889}
2890
2891
2892/**
2893 * Gets the user mode VM structure pointer given the VM handle.
2894 *
2895 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
2896 * invalid (asserted).
2897 * @param pVM The VM handle.
2898 * @sa VMR3GetVM, VMR3RetainUVM
2899 */
2900VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
2901{
2902 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
2903 return pVM->pUVM;
2904}
2905
2906
2907/**
2908 * Gets the shared VM structure pointer given the pointer to the user mode VM
2909 * structure.
2910 *
2911 * @returns Pointer to the shared VM structure.
2912 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
2913 * is currently associated with it.
2914 * @param pUVM The user mode VM handle.
2915 * @sa VMR3GetUVM
2916 */
2917VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
2918{
2919 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2920 return pUVM->pVM;
2921}
2922
2923
2924/**
2925 * Retain the user mode VM handle.
2926 *
2927 * @returns Reference count.
2928 * UINT32_MAX if @a pUVM is invalid.
2929 *
2930 * @param pUVM The user mode VM handle.
2931 * @sa VMR3ReleaseUVM
2932 */
2933VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
2934{
2935 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2936 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
2937 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
2938 return cRefs;
2939}
2940
2941
2942/**
2943 * Does the final release of the UVM structure.
2944 *
2945 * @param pUVM The user mode VM handle.
2946 */
2947static void vmR3DoReleaseUVM(PUVM pUVM)
2948{
2949 /*
2950 * Free the UVM.
2951 */
2952 Assert(!pUVM->pVM);
2953
2954 MMR3TermUVM(pUVM);
2955 STAMR3TermUVM(pUVM);
2956
2957 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2958 RTTlsFree(pUVM->vm.s.idxTLS);
2959 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
2960}
2961
2962
2963/**
2964 * Releases a refernece to the mode VM handle.
2965 *
2966 * @returns The new reference count, 0 if destroyed.
2967 * UINT32_MAX if @a pUVM is invalid.
2968 *
2969 * @param pUVM The user mode VM handle.
2970 * @sa VMR3RetainUVM
2971 */
2972VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
2973{
2974 if (!pUVM)
2975 return 0;
2976 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2977 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
2978 if (!cRefs)
2979 vmR3DoReleaseUVM(pUVM);
2980 else
2981 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
2982 return cRefs;
2983}
2984
2985
2986/**
2987 * Gets the VM name.
2988 *
2989 * @returns Pointer to a read-only string containing the name. NULL if called
2990 * too early.
2991 * @param pUVM The user mode VM handle.
2992 */
2993VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
2994{
2995 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2996 return pUVM->vm.s.pszName;
2997}
2998
2999
3000/**
3001 * Gets the VM UUID.
3002 *
3003 * @returns pUuid on success, NULL on failure.
3004 * @param pUVM The user mode VM handle.
3005 * @param pUuid Where to store the UUID.
3006 */
3007VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
3008{
3009 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3010 AssertPtrReturn(pUuid, NULL);
3011
3012 *pUuid = pUVM->vm.s.Uuid;
3013 return pUuid;
3014}
3015
3016
3017/**
3018 * Gets the current VM state.
3019 *
3020 * @returns The current VM state.
3021 * @param pVM VM handle.
3022 * @thread Any
3023 */
3024VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
3025{
3026 VM_ASSERT_VALID_EXT_RETURN(pVM, VMSTATE_TERMINATED);
3027 return pVM->enmVMState;
3028}
3029
3030
3031/**
3032 * Gets the current VM state.
3033 *
3034 * @returns The current VM state.
3035 * @param pUVM The user-mode VM handle.
3036 * @thread Any
3037 */
3038VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
3039{
3040 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
3041 if (RT_UNLIKELY(!pUVM->pVM))
3042 return VMSTATE_TERMINATED;
3043 return pUVM->pVM->enmVMState;
3044}
3045
3046
3047/**
3048 * Gets the state name string for a VM state.
3049 *
3050 * @returns Pointer to the state name. (readonly)
3051 * @param enmState The state.
3052 */
3053VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
3054{
3055 switch (enmState)
3056 {
3057 case VMSTATE_CREATING: return "CREATING";
3058 case VMSTATE_CREATED: return "CREATED";
3059 case VMSTATE_LOADING: return "LOADING";
3060 case VMSTATE_POWERING_ON: return "POWERING_ON";
3061 case VMSTATE_RESUMING: return "RESUMING";
3062 case VMSTATE_RUNNING: return "RUNNING";
3063 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
3064 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
3065 case VMSTATE_RESETTING: return "RESETTING";
3066 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
3067 case VMSTATE_SUSPENDED: return "SUSPENDED";
3068 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
3069 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
3070 case VMSTATE_SUSPENDING: return "SUSPENDING";
3071 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
3072 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
3073 case VMSTATE_SAVING: return "SAVING";
3074 case VMSTATE_DEBUGGING: return "DEBUGGING";
3075 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
3076 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
3077 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
3078 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
3079 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
3080 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
3081 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
3082 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
3083 case VMSTATE_OFF: return "OFF";
3084 case VMSTATE_OFF_LS: return "OFF_LS";
3085 case VMSTATE_DESTROYING: return "DESTROYING";
3086 case VMSTATE_TERMINATED: return "TERMINATED";
3087
3088 default:
3089 AssertMsgFailed(("Unknown state %d\n", enmState));
3090 return "Unknown!\n";
3091 }
3092}
3093
3094
3095/**
3096 * Validates the state transition in strict builds.
3097 *
3098 * @returns true if valid, false if not.
3099 *
3100 * @param enmStateOld The old (current) state.
3101 * @param enmStateNew The proposed new state.
3102 *
3103 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
3104 * diagram (under State Machine Diagram).
3105 */
3106static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
3107{
3108#ifdef VBOX_STRICT
3109 switch (enmStateOld)
3110 {
3111 case VMSTATE_CREATING:
3112 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3113 break;
3114
3115 case VMSTATE_CREATED:
3116 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
3117 || enmStateNew == VMSTATE_POWERING_ON
3118 || enmStateNew == VMSTATE_POWERING_OFF
3119 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3120 break;
3121
3122 case VMSTATE_LOADING:
3123 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3124 || enmStateNew == VMSTATE_LOAD_FAILURE
3125 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3126 break;
3127
3128 case VMSTATE_POWERING_ON:
3129 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3130 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3131 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3132 break;
3133
3134 case VMSTATE_RESUMING:
3135 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3136 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3137 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3138 break;
3139
3140 case VMSTATE_RUNNING:
3141 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3142 || enmStateNew == VMSTATE_SUSPENDING
3143 || enmStateNew == VMSTATE_RESETTING
3144 || enmStateNew == VMSTATE_RUNNING_LS
3145 || enmStateNew == VMSTATE_RUNNING_FT
3146 || enmStateNew == VMSTATE_DEBUGGING
3147 || enmStateNew == VMSTATE_FATAL_ERROR
3148 || enmStateNew == VMSTATE_GURU_MEDITATION
3149 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3150 break;
3151
3152 case VMSTATE_RUNNING_LS:
3153 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3154 || enmStateNew == VMSTATE_SUSPENDING_LS
3155 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3156 || enmStateNew == VMSTATE_RESETTING_LS
3157 || enmStateNew == VMSTATE_RUNNING
3158 || enmStateNew == VMSTATE_DEBUGGING_LS
3159 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3160 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3161 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3162 break;
3163
3164 case VMSTATE_RUNNING_FT:
3165 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3166 || enmStateNew == VMSTATE_FATAL_ERROR
3167 || enmStateNew == VMSTATE_GURU_MEDITATION
3168 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3169 break;
3170
3171 case VMSTATE_RESETTING:
3172 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3173 break;
3174
3175 case VMSTATE_RESETTING_LS:
3176 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3177 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3178 break;
3179
3180 case VMSTATE_SUSPENDING:
3181 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3182 break;
3183
3184 case VMSTATE_SUSPENDING_LS:
3185 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3186 || enmStateNew == VMSTATE_SUSPENDED_LS
3187 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3188 break;
3189
3190 case VMSTATE_SUSPENDING_EXT_LS:
3191 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3192 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3193 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3194 break;
3195
3196 case VMSTATE_SUSPENDED:
3197 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3198 || enmStateNew == VMSTATE_SAVING
3199 || enmStateNew == VMSTATE_RESETTING
3200 || enmStateNew == VMSTATE_RESUMING
3201 || enmStateNew == VMSTATE_LOADING
3202 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3203 break;
3204
3205 case VMSTATE_SUSPENDED_LS:
3206 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3207 || enmStateNew == VMSTATE_SAVING
3208 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3209 break;
3210
3211 case VMSTATE_SUSPENDED_EXT_LS:
3212 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3213 || enmStateNew == VMSTATE_SAVING
3214 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3215 break;
3216
3217 case VMSTATE_SAVING:
3218 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3219 break;
3220
3221 case VMSTATE_DEBUGGING:
3222 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3223 || enmStateNew == VMSTATE_POWERING_OFF
3224 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3225 break;
3226
3227 case VMSTATE_DEBUGGING_LS:
3228 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3229 || enmStateNew == VMSTATE_RUNNING_LS
3230 || enmStateNew == VMSTATE_POWERING_OFF_LS
3231 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3232 break;
3233
3234 case VMSTATE_POWERING_OFF:
3235 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3236 break;
3237
3238 case VMSTATE_POWERING_OFF_LS:
3239 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3240 || enmStateNew == VMSTATE_OFF_LS
3241 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3242 break;
3243
3244 case VMSTATE_OFF:
3245 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3246 break;
3247
3248 case VMSTATE_OFF_LS:
3249 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3250 break;
3251
3252 case VMSTATE_FATAL_ERROR:
3253 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3254 break;
3255
3256 case VMSTATE_FATAL_ERROR_LS:
3257 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3258 || enmStateNew == VMSTATE_POWERING_OFF_LS
3259 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3260 break;
3261
3262 case VMSTATE_GURU_MEDITATION:
3263 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3264 || enmStateNew == VMSTATE_POWERING_OFF
3265 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3266 break;
3267
3268 case VMSTATE_GURU_MEDITATION_LS:
3269 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3270 || enmStateNew == VMSTATE_DEBUGGING_LS
3271 || enmStateNew == VMSTATE_POWERING_OFF_LS
3272 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3273 break;
3274
3275 case VMSTATE_LOAD_FAILURE:
3276 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3277 break;
3278
3279 case VMSTATE_DESTROYING:
3280 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3281 break;
3282
3283 case VMSTATE_TERMINATED:
3284 default:
3285 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3286 break;
3287 }
3288#endif /* VBOX_STRICT */
3289 return true;
3290}
3291
3292
3293/**
3294 * Does the state change callouts.
3295 *
3296 * The caller owns the AtStateCritSect.
3297 *
3298 * @param pVM The VM handle.
3299 * @param pUVM The UVM handle.
3300 * @param enmStateNew The New state.
3301 * @param enmStateOld The old state.
3302 */
3303static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3304{
3305 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3306
3307 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3308 {
3309 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
3310 if ( enmStateNew != VMSTATE_DESTROYING
3311 && pVM->enmVMState == VMSTATE_DESTROYING)
3312 break;
3313 AssertMsg(pVM->enmVMState == enmStateNew,
3314 ("You are not allowed to change the state while in the change callback, except "
3315 "from destroying the VM. There are restrictions in the way the state changes "
3316 "are propagated up to the EM execution loop and it makes the program flow very "
3317 "difficult to follow. (%s, expected %s, old %s)\n",
3318 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3319 VMR3GetStateName(enmStateOld)));
3320 }
3321}
3322
3323
3324/**
3325 * Sets the current VM state, with the AtStatCritSect already entered.
3326 *
3327 * @param pVM The VM handle.
3328 * @param pUVM The UVM handle.
3329 * @param enmStateNew The new state.
3330 * @param enmStateOld The old state.
3331 */
3332static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3333{
3334 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3335
3336 AssertMsg(pVM->enmVMState == enmStateOld,
3337 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3338 pUVM->vm.s.enmPrevVMState = enmStateOld;
3339 pVM->enmVMState = enmStateNew;
3340 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3341
3342 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3343}
3344
3345
3346/**
3347 * Sets the current VM state.
3348 *
3349 * @param pVM VM handle.
3350 * @param enmStateNew The new state.
3351 * @param enmStateOld The old state (for asserting only).
3352 */
3353static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3354{
3355 PUVM pUVM = pVM->pUVM;
3356 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3357
3358 AssertMsg(pVM->enmVMState == enmStateOld,
3359 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3360 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
3361
3362 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3363}
3364
3365
3366/**
3367 * Tries to perform a state transition.
3368 *
3369 * @returns The 1-based ordinal of the succeeding transition.
3370 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3371 *
3372 * @param pVM The VM handle.
3373 * @param pszWho Who is trying to change it.
3374 * @param cTransitions The number of transitions in the ellipsis.
3375 * @param ... Transition pairs; new, old.
3376 */
3377static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3378{
3379 va_list va;
3380 VMSTATE enmStateNew = VMSTATE_CREATED;
3381 VMSTATE enmStateOld = VMSTATE_CREATED;
3382
3383#ifdef VBOX_STRICT
3384 /*
3385 * Validate the input first.
3386 */
3387 va_start(va, cTransitions);
3388 for (unsigned i = 0; i < cTransitions; i++)
3389 {
3390 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3391 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3392 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3393 }
3394 va_end(va);
3395#endif
3396
3397 /*
3398 * Grab the lock and see if any of the proposed transitions works out.
3399 */
3400 va_start(va, cTransitions);
3401 int rc = VERR_VM_INVALID_VM_STATE;
3402 PUVM pUVM = pVM->pUVM;
3403 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3404
3405 VMSTATE enmStateCur = pVM->enmVMState;
3406
3407 for (unsigned i = 0; i < cTransitions; i++)
3408 {
3409 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3410 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3411 if (enmStateCur == enmStateOld)
3412 {
3413 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
3414 rc = i + 1;
3415 break;
3416 }
3417 }
3418
3419 if (RT_FAILURE(rc))
3420 {
3421 /*
3422 * Complain about it.
3423 */
3424 if (cTransitions == 1)
3425 {
3426 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3427 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3428 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3429 N_("%s failed because the VM state is %s instead of %s"),
3430 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3431 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3432 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3433 }
3434 else
3435 {
3436 va_end(va);
3437 va_start(va, cTransitions);
3438 LogRel(("%s:\n", pszWho));
3439 for (unsigned i = 0; i < cTransitions; i++)
3440 {
3441 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3442 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3443 LogRel(("%s%s -> %s",
3444 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3445 }
3446 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3447 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3448 N_("%s failed because the current VM state, %s, was not found in the state transition table"),
3449 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3450 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3451 pszWho, VMR3GetStateName(enmStateCur)));
3452 }
3453 }
3454
3455 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3456 va_end(va);
3457 Assert(rc > 0 || rc < 0);
3458 return rc;
3459}
3460
3461
3462/**
3463 * Flag a guru meditation ... a hack.
3464 *
3465 * @param pVM The VM handle
3466 *
3467 * @todo Rewrite this part. The guru meditation should be flagged
3468 * immediately by the VMM and not by VMEmt.cpp when it's all over.
3469 */
3470void vmR3SetGuruMeditation(PVM pVM)
3471{
3472 PUVM pUVM = pVM->pUVM;
3473 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3474
3475 VMSTATE enmStateCur = pVM->enmVMState;
3476 if (enmStateCur == VMSTATE_RUNNING)
3477 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING);
3478 else if (enmStateCur == VMSTATE_RUNNING_LS)
3479 {
3480 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
3481 SSMR3Cancel(pVM);
3482 }
3483
3484 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3485}
3486
3487
3488/**
3489 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3490 *
3491 * @param pVM The VM handle.
3492 */
3493void vmR3SetTerminated(PVM pVM)
3494{
3495 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3496}
3497
3498
3499/**
3500 * Checks if the VM was teleported and hasn't been fully resumed yet.
3501 *
3502 * This applies to both sides of the teleportation since we may leave a working
3503 * clone behind and the user is allowed to resume this...
3504 *
3505 * @returns true / false.
3506 * @param pVM The VM handle.
3507 * @thread Any thread.
3508 */
3509VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3510{
3511 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3512 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3513}
3514
3515
3516/**
3517 * Registers a VM state change callback.
3518 *
3519 * You are not allowed to call any function which changes the VM state from a
3520 * state callback.
3521 *
3522 * @returns VBox status code.
3523 * @param pVM VM handle.
3524 * @param pfnAtState Pointer to callback.
3525 * @param pvUser User argument.
3526 * @thread Any.
3527 */
3528VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3529{
3530 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3531
3532 /*
3533 * Validate input.
3534 */
3535 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3536 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3537
3538 /*
3539 * Allocate a new record.
3540 */
3541 PUVM pUVM = pVM->pUVM;
3542 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3543 if (!pNew)
3544 return VERR_NO_MEMORY;
3545
3546 /* fill */
3547 pNew->pfnAtState = pfnAtState;
3548 pNew->pvUser = pvUser;
3549
3550 /* insert */
3551 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3552 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3553 *pUVM->vm.s.ppAtStateNext = pNew;
3554 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3555 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3556
3557 return VINF_SUCCESS;
3558}
3559
3560
3561/**
3562 * Deregisters a VM state change callback.
3563 *
3564 * @returns VBox status code.
3565 * @param pVM VM handle.
3566 * @param pfnAtState Pointer to callback.
3567 * @param pvUser User argument.
3568 * @thread Any.
3569 */
3570VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3571{
3572 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3573
3574 /*
3575 * Validate input.
3576 */
3577 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3578 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3579
3580 PUVM pUVM = pVM->pUVM;
3581 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3582
3583 /*
3584 * Search the list for the entry.
3585 */
3586 PVMATSTATE pPrev = NULL;
3587 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3588 while ( pCur
3589 && ( pCur->pfnAtState != pfnAtState
3590 || pCur->pvUser != pvUser))
3591 {
3592 pPrev = pCur;
3593 pCur = pCur->pNext;
3594 }
3595 if (!pCur)
3596 {
3597 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3598 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3599 return VERR_FILE_NOT_FOUND;
3600 }
3601
3602 /*
3603 * Unlink it.
3604 */
3605 if (pPrev)
3606 {
3607 pPrev->pNext = pCur->pNext;
3608 if (!pCur->pNext)
3609 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3610 }
3611 else
3612 {
3613 pUVM->vm.s.pAtState = pCur->pNext;
3614 if (!pCur->pNext)
3615 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3616 }
3617
3618 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3619
3620 /*
3621 * Free it.
3622 */
3623 pCur->pfnAtState = NULL;
3624 pCur->pNext = NULL;
3625 MMR3HeapFree(pCur);
3626
3627 return VINF_SUCCESS;
3628}
3629
3630
3631/**
3632 * Registers a VM error callback.
3633 *
3634 * @returns VBox status code.
3635 * @param pVM The VM handle.
3636 * @param pfnAtError Pointer to callback.
3637 * @param pvUser User argument.
3638 * @thread Any.
3639 */
3640VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3641{
3642 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3643 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3644}
3645
3646
3647/**
3648 * Registers a VM error callback.
3649 *
3650 * @returns VBox status code.
3651 * @param pUVM The VM handle.
3652 * @param pfnAtError Pointer to callback.
3653 * @param pvUser User argument.
3654 * @thread Any.
3655 */
3656VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3657{
3658 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3659
3660 /*
3661 * Validate input.
3662 */
3663 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3664 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3665
3666 /*
3667 * Allocate a new record.
3668 */
3669 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3670 if (!pNew)
3671 return VERR_NO_MEMORY;
3672
3673 /* fill */
3674 pNew->pfnAtError = pfnAtError;
3675 pNew->pvUser = pvUser;
3676
3677 /* insert */
3678 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3679 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3680 *pUVM->vm.s.ppAtErrorNext = pNew;
3681 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3682 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3683
3684 return VINF_SUCCESS;
3685}
3686
3687
3688/**
3689 * Deregisters a VM error callback.
3690 *
3691 * @returns VBox status code.
3692 * @param pVM The VM handle.
3693 * @param pfnAtError Pointer to callback.
3694 * @param pvUser User argument.
3695 * @thread Any.
3696 */
3697VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3698{
3699 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3700
3701 /*
3702 * Validate input.
3703 */
3704 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3705 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3706
3707 PUVM pUVM = pVM->pUVM;
3708 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3709
3710 /*
3711 * Search the list for the entry.
3712 */
3713 PVMATERROR pPrev = NULL;
3714 PVMATERROR pCur = pUVM->vm.s.pAtError;
3715 while ( pCur
3716 && ( pCur->pfnAtError != pfnAtError
3717 || pCur->pvUser != pvUser))
3718 {
3719 pPrev = pCur;
3720 pCur = pCur->pNext;
3721 }
3722 if (!pCur)
3723 {
3724 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3725 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3726 return VERR_FILE_NOT_FOUND;
3727 }
3728
3729 /*
3730 * Unlink it.
3731 */
3732 if (pPrev)
3733 {
3734 pPrev->pNext = pCur->pNext;
3735 if (!pCur->pNext)
3736 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3737 }
3738 else
3739 {
3740 pUVM->vm.s.pAtError = pCur->pNext;
3741 if (!pCur->pNext)
3742 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3743 }
3744
3745 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3746
3747 /*
3748 * Free it.
3749 */
3750 pCur->pfnAtError = NULL;
3751 pCur->pNext = NULL;
3752 MMR3HeapFree(pCur);
3753
3754 return VINF_SUCCESS;
3755}
3756
3757
3758/**
3759 * Ellipsis to va_list wrapper for calling pfnAtError.
3760 */
3761static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3762{
3763 va_list va;
3764 va_start(va, pszFormat);
3765 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3766 va_end(va);
3767}
3768
3769
3770/**
3771 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3772 * The message is found in VMINT.
3773 *
3774 * @param pVM The VM handle.
3775 * @thread EMT.
3776 */
3777VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3778{
3779 VM_ASSERT_EMT(pVM);
3780 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contracts!\n"));
3781
3782 /*
3783 * Unpack the error (if we managed to format one).
3784 */
3785 PVMERROR pErr = pVM->vm.s.pErrorR3;
3786 const char *pszFile = NULL;
3787 const char *pszFunction = NULL;
3788 uint32_t iLine = 0;
3789 const char *pszMessage;
3790 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3791 if (pErr)
3792 {
3793 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3794 if (pErr->offFile)
3795 pszFile = (const char *)pErr + pErr->offFile;
3796 iLine = pErr->iLine;
3797 if (pErr->offFunction)
3798 pszFunction = (const char *)pErr + pErr->offFunction;
3799 if (pErr->offMessage)
3800 pszMessage = (const char *)pErr + pErr->offMessage;
3801 else
3802 pszMessage = "No message!";
3803 }
3804 else
3805 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3806
3807 /*
3808 * Call the at error callbacks.
3809 */
3810 PUVM pUVM = pVM->pUVM;
3811 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3812 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3813 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3814 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3815 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3816}
3817
3818
3819/**
3820 * Gets the number of errors raised via VMSetError.
3821 *
3822 * This can be used avoid double error messages.
3823 *
3824 * @returns The error count.
3825 * @param pVM The VM handle.
3826 */
3827VMMR3DECL(uint32_t) VMR3GetErrorCount(PVM pVM)
3828{
3829 AssertPtrReturn(pVM, 0);
3830 return VMR3GetErrorCountU(pVM->pUVM);
3831}
3832
3833
3834/**
3835 * Gets the number of errors raised via VMSetError.
3836 *
3837 * This can be used avoid double error messages.
3838 *
3839 * @returns The error count.
3840 * @param pVM The VM handle.
3841 */
3842VMMR3DECL(uint32_t) VMR3GetErrorCountU(PUVM pUVM)
3843{
3844 AssertPtrReturn(pUVM, 0);
3845 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3846 return pUVM->vm.s.cErrors;
3847}
3848
3849
3850/**
3851 * Creation time wrapper for vmR3SetErrorUV.
3852 *
3853 * @returns rc.
3854 * @param pUVM Pointer to the user mode VM structure.
3855 * @param rc The VBox status code.
3856 * @param RT_SRC_POS_DECL The source position of this error.
3857 * @param pszFormat Format string.
3858 * @param ... The arguments.
3859 * @thread Any thread.
3860 */
3861static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3862{
3863 va_list va;
3864 va_start(va, pszFormat);
3865 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3866 va_end(va);
3867 return rc;
3868}
3869
3870
3871/**
3872 * Worker which calls everyone listening to the VM error messages.
3873 *
3874 * @param pUVM Pointer to the user mode VM structure.
3875 * @param rc The VBox status code.
3876 * @param RT_SRC_POS_DECL The source position of this error.
3877 * @param pszFormat Format string.
3878 * @param pArgs Pointer to the format arguments.
3879 * @thread EMT
3880 */
3881DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3882{
3883 /*
3884 * Log the error.
3885 */
3886 va_list va3;
3887 va_copy(va3, *pArgs);
3888 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3889 "VMSetError: %N\n",
3890 pszFile, iLine, pszFunction, rc,
3891 pszFormat, &va3);
3892 va_end(va3);
3893
3894#ifdef LOG_ENABLED
3895 va_copy(va3, *pArgs);
3896 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3897 "%N\n",
3898 pszFile, iLine, pszFunction, rc,
3899 pszFormat, &va3);
3900 va_end(va3);
3901#endif
3902
3903 /*
3904 * Make a copy of the message.
3905 */
3906 if (pUVM->pVM)
3907 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3908
3909 /*
3910 * Call the at error callbacks.
3911 */
3912 bool fCalledSomeone = false;
3913 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3914 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3915 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3916 {
3917 va_list va2;
3918 va_copy(va2, *pArgs);
3919 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3920 va_end(va2);
3921 fCalledSomeone = true;
3922 }
3923 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3924}
3925
3926
3927/**
3928 * Registers a VM runtime error callback.
3929 *
3930 * @returns VBox status code.
3931 * @param pVM The VM handle.
3932 * @param pfnAtRuntimeError Pointer to callback.
3933 * @param pvUser User argument.
3934 * @thread Any.
3935 */
3936VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3937{
3938 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3939
3940 /*
3941 * Validate input.
3942 */
3943 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3944 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3945
3946 /*
3947 * Allocate a new record.
3948 */
3949 PUVM pUVM = pVM->pUVM;
3950 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3951 if (!pNew)
3952 return VERR_NO_MEMORY;
3953
3954 /* fill */
3955 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3956 pNew->pvUser = pvUser;
3957
3958 /* insert */
3959 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3960 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3961 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3962 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3963 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3964
3965 return VINF_SUCCESS;
3966}
3967
3968
3969/**
3970 * Deregisters a VM runtime error callback.
3971 *
3972 * @returns VBox status code.
3973 * @param pVM The VM handle.
3974 * @param pfnAtRuntimeError Pointer to callback.
3975 * @param pvUser User argument.
3976 * @thread Any.
3977 */
3978VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3979{
3980 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3981
3982 /*
3983 * Validate input.
3984 */
3985 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3986 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3987
3988 PUVM pUVM = pVM->pUVM;
3989 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3990
3991 /*
3992 * Search the list for the entry.
3993 */
3994 PVMATRUNTIMEERROR pPrev = NULL;
3995 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3996 while ( pCur
3997 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3998 || pCur->pvUser != pvUser))
3999 {
4000 pPrev = pCur;
4001 pCur = pCur->pNext;
4002 }
4003 if (!pCur)
4004 {
4005 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
4006 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4007 return VERR_FILE_NOT_FOUND;
4008 }
4009
4010 /*
4011 * Unlink it.
4012 */
4013 if (pPrev)
4014 {
4015 pPrev->pNext = pCur->pNext;
4016 if (!pCur->pNext)
4017 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
4018 }
4019 else
4020 {
4021 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
4022 if (!pCur->pNext)
4023 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
4024 }
4025
4026 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4027
4028 /*
4029 * Free it.
4030 */
4031 pCur->pfnAtRuntimeError = NULL;
4032 pCur->pNext = NULL;
4033 MMR3HeapFree(pCur);
4034
4035 return VINF_SUCCESS;
4036}
4037
4038
4039/**
4040 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
4041 * the state to FatalError(LS).
4042 *
4043 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
4044 * return code, see FNVMMEMTRENDEZVOUS.)
4045 *
4046 * @param pVM The VM handle.
4047 * @param pVCpu The VMCPU handle of the EMT.
4048 * @param pvUser Ignored.
4049 */
4050static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
4051{
4052 NOREF(pVCpu);
4053 Assert(!pvUser); NOREF(pvUser);
4054
4055 /*
4056 * The first EMT thru here changes the state.
4057 */
4058 if (pVCpu->idCpu == pVM->cCpus - 1)
4059 {
4060 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
4061 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
4062 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
4063 if (RT_FAILURE(rc))
4064 return rc;
4065 if (rc == 2)
4066 SSMR3Cancel(pVM);
4067
4068 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
4069 }
4070
4071 /* This'll make sure we get out of whereever we are (e.g. REM). */
4072 return VINF_EM_SUSPEND;
4073}
4074
4075
4076/**
4077 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
4078 *
4079 * This does the common parts after the error has been saved / retrieved.
4080 *
4081 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4082 *
4083 * @param pVM The VM handle.
4084 * @param fFlags The error flags.
4085 * @param pszErrorId Error ID string.
4086 * @param pszFormat Format string.
4087 * @param pVa Pointer to the format arguments.
4088 */
4089static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4090{
4091 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
4092
4093 /*
4094 * Take actions before the call.
4095 */
4096 int rc;
4097 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4098 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4099 vmR3SetRuntimeErrorChangeState, NULL);
4100 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4101 rc = VMR3Suspend(pVM);
4102 else
4103 rc = VINF_SUCCESS;
4104
4105 /*
4106 * Do the callback round.
4107 */
4108 PUVM pUVM = pVM->pUVM;
4109 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4110 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4111 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4112 {
4113 va_list va;
4114 va_copy(va, *pVa);
4115 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4116 va_end(va);
4117 }
4118 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4119
4120 return rc;
4121}
4122
4123
4124/**
4125 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4126 */
4127static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4128{
4129 va_list va;
4130 va_start(va, pszFormat);
4131 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4132 va_end(va);
4133 return rc;
4134}
4135
4136
4137/**
4138 * This is a worker function for RC and Ring-0 calls to VMSetError and
4139 * VMSetErrorV.
4140 *
4141 * The message is found in VMINT.
4142 *
4143 * @returns VBox status code, see VMSetRuntimeError.
4144 * @param pVM The VM handle.
4145 * @thread EMT.
4146 */
4147VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4148{
4149 VM_ASSERT_EMT(pVM);
4150 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4151
4152 /*
4153 * Unpack the error (if we managed to format one).
4154 */
4155 const char *pszErrorId = "SetRuntimeError";
4156 const char *pszMessage = "No message!";
4157 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4158 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4159 if (pErr)
4160 {
4161 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4162 if (pErr->offErrorId)
4163 pszErrorId = (const char *)pErr + pErr->offErrorId;
4164 if (pErr->offMessage)
4165 pszMessage = (const char *)pErr + pErr->offMessage;
4166 fFlags = pErr->fFlags;
4167 }
4168
4169 /*
4170 * Join cause with vmR3SetRuntimeErrorV.
4171 */
4172 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4173}
4174
4175
4176/**
4177 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4178 *
4179 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4180 *
4181 * @param pVM The VM handle.
4182 * @param fFlags The error flags.
4183 * @param pszErrorId Error ID string.
4184 * @param pszMessage The error message residing the MM heap.
4185 *
4186 * @thread EMT
4187 */
4188DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4189{
4190#if 0 /** @todo make copy of the error msg. */
4191 /*
4192 * Make a copy of the message.
4193 */
4194 va_list va2;
4195 va_copy(va2, *pVa);
4196 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4197 va_end(va2);
4198#endif
4199
4200 /*
4201 * Join paths with VMR3SetRuntimeErrorWorker.
4202 */
4203 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4204 MMR3HeapFree(pszMessage);
4205 return rc;
4206}
4207
4208
4209/**
4210 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4211 *
4212 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4213 *
4214 * @param pVM The VM handle.
4215 * @param fFlags The error flags.
4216 * @param pszErrorId Error ID string.
4217 * @param pszFormat Format string.
4218 * @param pVa Pointer to the format arguments.
4219 *
4220 * @thread EMT
4221 */
4222DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4223{
4224 /*
4225 * Make a copy of the message.
4226 */
4227 va_list va2;
4228 va_copy(va2, *pVa);
4229 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4230 va_end(va2);
4231
4232 /*
4233 * Join paths with VMR3SetRuntimeErrorWorker.
4234 */
4235 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4236}
4237
4238
4239/**
4240 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4241 *
4242 * This can be used avoid double error messages.
4243 *
4244 * @returns The runtime error count.
4245 * @param pVM The VM handle.
4246 */
4247VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
4248{
4249 return pVM->pUVM->vm.s.cRuntimeErrors;
4250}
4251
4252
4253/**
4254 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4255 *
4256 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4257 *
4258 * @param pVM The VM handle.
4259 */
4260VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4261{
4262 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4263 return pUVCpu
4264 ? pUVCpu->idCpu
4265 : NIL_VMCPUID;
4266}
4267
4268
4269/**
4270 * Returns the native handle of the current EMT VMCPU thread.
4271 *
4272 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4273 * @param pVM The VM handle.
4274 * @thread EMT
4275 */
4276VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4277{
4278 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4279
4280 if (!pUVCpu)
4281 return NIL_RTNATIVETHREAD;
4282
4283 return pUVCpu->vm.s.NativeThreadEMT;
4284}
4285
4286
4287/**
4288 * Returns the native handle of the current EMT VMCPU thread.
4289 *
4290 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4291 * @param pVM The VM handle.
4292 * @thread EMT
4293 */
4294VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4295{
4296 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4297
4298 if (!pUVCpu)
4299 return NIL_RTNATIVETHREAD;
4300
4301 return pUVCpu->vm.s.NativeThreadEMT;
4302}
4303
4304
4305/**
4306 * Returns the handle of the current EMT VMCPU thread.
4307 *
4308 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4309 * @param pVM The VM handle.
4310 * @thread EMT
4311 */
4312VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
4313{
4314 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4315
4316 if (!pUVCpu)
4317 return NIL_RTTHREAD;
4318
4319 return pUVCpu->vm.s.ThreadEMT;
4320}
4321
4322
4323/**
4324 * Returns the handle of the current EMT VMCPU thread.
4325 *
4326 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4327 * @param pVM The VM handle.
4328 * @thread EMT
4329 */
4330VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
4331{
4332 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4333
4334 if (!pUVCpu)
4335 return NIL_RTTHREAD;
4336
4337 return pUVCpu->vm.s.ThreadEMT;
4338}
4339
4340
4341/**
4342 * Return the package and core id of a CPU.
4343 *
4344 * @returns VBOX status code.
4345 * @param pVM The VM to operate on.
4346 * @param idCpu Virtual CPU to get the ID from.
4347 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4348 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4349 *
4350 */
4351VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PVM pVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4352{
4353 /*
4354 * Validate input.
4355 */
4356 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4357 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4358 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4359 if (idCpu >= pVM->cCpus)
4360 return VERR_INVALID_CPU_ID;
4361
4362 /*
4363 * Set return values.
4364 */
4365#ifdef VBOX_WITH_MULTI_CORE
4366 *pidCpuCore = idCpu;
4367 *pidCpuPackage = 0;
4368#else
4369 *pidCpuCore = 0;
4370 *pidCpuPackage = idCpu;
4371#endif
4372
4373 return VINF_SUCCESS;
4374}
4375
4376
4377/**
4378 * Worker for VMR3HotUnplugCpu.
4379 *
4380 * @returns VINF_EM_WAIT_SPIP (strict status code).
4381 * @param pVM The VM handle.
4382 * @param idCpu The current CPU.
4383 */
4384static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4385{
4386 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4387 VMCPU_ASSERT_EMT(pVCpu);
4388
4389 /*
4390 * Reset per CPU resources.
4391 *
4392 * Actually only needed for VT-x because the CPU seems to be still in some
4393 * paged mode and startup fails after a new hot plug event. SVM works fine
4394 * even without this.
4395 */
4396 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4397 PGMR3ResetUnpluggedCpu(pVM, pVCpu);
4398 PDMR3ResetCpu(pVCpu);
4399 TRPMR3ResetCpu(pVCpu);
4400 CPUMR3ResetCpu(pVCpu);
4401 EMR3ResetCpu(pVCpu);
4402 HWACCMR3ResetCpu(pVCpu);
4403 return VINF_EM_WAIT_SIPI;
4404}
4405
4406
4407/**
4408 * Hot-unplugs a CPU from the guest.
4409 *
4410 * @returns VBox status code.
4411 * @param pVM The VM to operate on.
4412 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4413 */
4414VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4415{
4416 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4417 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4418
4419 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4420 * broadcast requests. Just note down somewhere that the CPU is
4421 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4422 * it out of the EM loops when offline. */
4423 return VMR3ReqCallNoWaitU(pVM->pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4424}
4425
4426
4427/**
4428 * Hot-plugs a CPU on the guest.
4429 *
4430 * @returns VBox status code.
4431 * @param pVM The VM to operate on.
4432 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4433 */
4434VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
4435{
4436 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4437 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4438
4439 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4440 return VINF_SUCCESS;
4441}
4442
4443
4444/**
4445 * Changes the VMM execution cap.
4446 *
4447 * @returns VBox status code.
4448 * @param pVM The VM to operate on.
4449 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4450 * 100 is max performance (default).
4451 */
4452VMMR3DECL(int) VMR3SetCpuExecutionCap(PVM pVM, uint32_t uCpuExecutionCap)
4453{
4454 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4455 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4456
4457 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4458 /* Note: not called from EMT. */
4459 pVM->uCpuExecutionCap = uCpuExecutionCap;
4460 return VINF_SUCCESS;
4461}
4462
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette