VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 36054

Last change on this file since 36054 was 36041, checked in by vboxsync, 14 years ago

Main/VMM: Use UVM w/ refcounting - part 1.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 155.4 KB
Line 
1/* $Id: VM.cpp 36041 2011-02-21 16:04:53Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41/*******************************************************************************
42* Header Files *
43*******************************************************************************/
44#define LOG_GROUP LOG_GROUP_VM
45#include <VBox/vmm/cfgm.h>
46#include <VBox/vmm/vmm.h>
47#include <VBox/vmm/gvmm.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/cpum.h>
50#include <VBox/vmm/selm.h>
51#include <VBox/vmm/trpm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/vmm/pgm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/em.h>
57#include <VBox/vmm/rem.h>
58#include <VBox/vmm/tm.h>
59#include <VBox/vmm/stam.h>
60#include <VBox/vmm/patm.h>
61#include <VBox/vmm/csam.h>
62#include <VBox/vmm/iom.h>
63#include <VBox/vmm/ssm.h>
64#include <VBox/vmm/ftm.h>
65#include <VBox/vmm/hwaccm.h>
66#include "VMInternal.h"
67#include <VBox/vmm/vm.h>
68#include <VBox/vmm/uvm.h>
69
70#include <VBox/sup.h>
71#include <VBox/dbg.h>
72#include <VBox/err.h>
73#include <VBox/param.h>
74#include <VBox/log.h>
75#include <iprt/assert.h>
76#include <iprt/alloc.h>
77#include <iprt/asm.h>
78#include <iprt/env.h>
79#include <iprt/string.h>
80#include <iprt/time.h>
81#include <iprt/semaphore.h>
82#include <iprt/thread.h>
83#include <iprt/uuid.h>
84
85
86/*******************************************************************************
87* Structures and Typedefs *
88*******************************************************************************/
89/**
90 * VM destruction callback registration record.
91 */
92typedef struct VMATDTOR
93{
94 /** Pointer to the next record in the list. */
95 struct VMATDTOR *pNext;
96 /** Pointer to the callback function. */
97 PFNVMATDTOR pfnAtDtor;
98 /** The user argument. */
99 void *pvUser;
100} VMATDTOR;
101/** Pointer to a VM destruction callback registration record. */
102typedef VMATDTOR *PVMATDTOR;
103
104
105/*******************************************************************************
106* Global Variables *
107*******************************************************************************/
108/** Pointer to the list of VMs. */
109static PUVM g_pUVMsHead = NULL;
110
111/** Pointer to the list of at VM destruction callbacks. */
112static PVMATDTOR g_pVMAtDtorHead = NULL;
113/** Lock the g_pVMAtDtorHead list. */
114#define VM_ATDTOR_LOCK() do { } while (0)
115/** Unlock the g_pVMAtDtorHead list. */
116#define VM_ATDTOR_UNLOCK() do { } while (0)
117
118
119/*******************************************************************************
120* Internal Functions *
121*******************************************************************************/
122static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
123static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
124static int vmR3InitRing3(PVM pVM, PUVM pUVM);
125static int vmR3InitRing0(PVM pVM);
126static int vmR3InitGC(PVM pVM);
127static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
128static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
129static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
130static void vmR3AtDtor(PVM pVM);
131static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
132static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
133static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
134static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
135static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
136static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
137
138
139/**
140 * Do global VMM init.
141 *
142 * @returns VBox status code.
143 */
144VMMR3DECL(int) VMR3GlobalInit(void)
145{
146 /*
147 * Only once.
148 */
149 static bool volatile s_fDone = false;
150 if (s_fDone)
151 return VINF_SUCCESS;
152
153 /*
154 * We're done.
155 */
156 s_fDone = true;
157 return VINF_SUCCESS;
158}
159
160
161
162/**
163 * Creates a virtual machine by calling the supplied configuration constructor.
164 *
165 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
166 * called to start the execution.
167 *
168 * @returns 0 on success.
169 * @returns VBox error code on failure.
170 * @param cCpus Number of virtual CPUs for the new VM.
171 * @param pVmm2UserMethods An optional method table that the VMM can use to
172 * make the user perform various action, like for
173 * instance state saving.
174 * @param pfnVMAtError Pointer to callback function for setting VM
175 * errors. This was added as an implicit call to
176 * VMR3AtErrorRegister() since there is no way the
177 * caller can get to the VM handle early enough to
178 * do this on its own.
179 * This is called in the context of an EMT.
180 * @param pvUserVM The user argument passed to pfnVMAtError.
181 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
182 * This is called in the context of an EMT0.
183 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
184 * @param ppVM Where to store the 'handle' of the created VM.
185 */
186VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
187 PFNVMATERROR pfnVMAtError, void *pvUserVM,
188 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
189 PVM *ppVM)
190{
191 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
192 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
193
194 if (pVmm2UserMethods)
195 {
196 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
197 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
198 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
199 AssertPtrReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
200 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
201 }
202 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
203 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
204 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
205
206 /*
207 * Because of the current hackiness of the applications
208 * we'll have to initialize global stuff from here.
209 * Later the applications will take care of this in a proper way.
210 */
211 static bool fGlobalInitDone = false;
212 if (!fGlobalInitDone)
213 {
214 int rc = VMR3GlobalInit();
215 if (RT_FAILURE(rc))
216 return rc;
217 fGlobalInitDone = true;
218 }
219
220 /*
221 * Validate input.
222 */
223 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
224
225 /*
226 * Create the UVM so we can register the at-error callback
227 * and consolidate a bit of cleanup code.
228 */
229 PUVM pUVM = NULL; /* shuts up gcc */
230 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
231 if (RT_FAILURE(rc))
232 return rc;
233 if (pfnVMAtError)
234 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
235 if (RT_SUCCESS(rc))
236 {
237 /*
238 * Initialize the support library creating the session for this VM.
239 */
240 rc = SUPR3Init(&pUVM->vm.s.pSession);
241 if (RT_SUCCESS(rc))
242 {
243 /*
244 * Call vmR3CreateU in the EMT thread and wait for it to finish.
245 *
246 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
247 * submitting a request to a specific VCPU without a pVM. So, to make
248 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
249 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
250 */
251 PVMREQ pReq;
252 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
253 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
254 if (RT_SUCCESS(rc))
255 {
256 rc = pReq->iStatus;
257 VMR3ReqFree(pReq);
258 if (RT_SUCCESS(rc))
259 {
260 /*
261 * Success!
262 */
263 *ppVM = pUVM->pVM;
264 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
265 return VINF_SUCCESS;
266 }
267 }
268 else
269 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
270
271 /*
272 * An error occurred during VM creation. Set the error message directly
273 * using the initial callback, as the callback list might not exist yet.
274 */
275 const char *pszError;
276 switch (rc)
277 {
278 case VERR_VMX_IN_VMX_ROOT_MODE:
279#ifdef RT_OS_LINUX
280 pszError = N_("VirtualBox can't operate in VMX root mode. "
281 "Please disable the KVM kernel extension, recompile your kernel and reboot");
282#else
283 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
284#endif
285 break;
286
287#ifndef RT_OS_DARWIN
288 case VERR_HWACCM_CONFIG_MISMATCH:
289 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
290 "This hardware extension is required by the VM configuration");
291 break;
292#endif
293
294 case VERR_SVM_IN_USE:
295#ifdef RT_OS_LINUX
296 pszError = N_("VirtualBox can't enable the AMD-V extension. "
297 "Please disable the KVM kernel extension, recompile your kernel and reboot");
298#else
299 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
300#endif
301 break;
302
303#ifdef RT_OS_LINUX
304 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
305 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
306 "that no kernel modules from an older version of VirtualBox exist. "
307 "Then try to recompile and reload the kernel modules by executing "
308 "'/etc/init.d/vboxdrv setup' as root");
309 break;
310#endif
311
312 case VERR_RAW_MODE_INVALID_SMP:
313 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
314 "VirtualBox requires this hardware extension to emulate more than one "
315 "guest CPU");
316 break;
317
318 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
319#ifdef RT_OS_LINUX
320 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
321 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
322 "the VT-x extension in the VM settings. Note that without VT-x you have "
323 "to reduce the number of guest CPUs to one");
324#else
325 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
326 "extension. Either upgrade your kernel or disable the VT-x extension in the "
327 "VM settings. Note that without VT-x you have to reduce the number of guest "
328 "CPUs to one");
329#endif
330 break;
331
332 case VERR_PDM_DEVICE_NOT_FOUND:
333 pszError = N_("A virtual device is configured in the VM settings but the device "
334 "implementation is missing.\n"
335 "A possible reason for this error is a missing extension pack. Note "
336 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
337 "support and remote desktop) are only available from an 'extension "
338 "pack' which must be downloaded and installed separately");
339 break;
340
341 default:
342 if (VMR3GetErrorCountU(pUVM) == 0)
343 pszError = RTErrGetFull(rc);
344 else
345 pszError = NULL; /* already set. */
346 break;
347 }
348 if (pszError)
349 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
350 }
351 else
352 {
353 /*
354 * An error occurred at support library initialization time (before the
355 * VM could be created). Set the error message directly using the
356 * initial callback, as the callback list doesn't exist yet.
357 */
358 const char *pszError;
359 switch (rc)
360 {
361 case VERR_VM_DRIVER_LOAD_ERROR:
362#ifdef RT_OS_LINUX
363 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
364 "was either not loaded or /dev/vboxdrv is not set up properly. "
365 "Re-setup the kernel module by executing "
366 "'/etc/init.d/vboxdrv setup' as root");
367#else
368 pszError = N_("VirtualBox kernel driver not loaded");
369#endif
370 break;
371 case VERR_VM_DRIVER_OPEN_ERROR:
372 pszError = N_("VirtualBox kernel driver cannot be opened");
373 break;
374 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
375#ifdef VBOX_WITH_HARDENING
376 /* This should only happen if the executable wasn't hardened - bad code/build. */
377 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
378 "Re-install VirtualBox. If you are building it yourself, you "
379 "should make sure it installed correctly and that the setuid "
380 "bit is set on the executables calling VMR3Create.");
381#else
382 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
383# if defined(RT_OS_DARWIN)
384 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
385 "If you have built VirtualBox yourself, make sure that you do not "
386 "have the vboxdrv KEXT from a different build or installation loaded.");
387# elif defined(RT_OS_LINUX)
388 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
389 "If you have built VirtualBox yourself, make sure that you do "
390 "not have the vboxdrv kernel module from a different build or "
391 "installation loaded. Also, make sure the vboxdrv udev rule gives "
392 "you the permission you need to access the device.");
393# elif defined(RT_OS_WINDOWS)
394 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
395# else /* solaris, freebsd, ++. */
396 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
397 "If you have built VirtualBox yourself, make sure that you do "
398 "not have the vboxdrv kernel module from a different install loaded.");
399# endif
400#endif
401 break;
402 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
403 case VERR_VM_DRIVER_NOT_INSTALLED:
404#ifdef RT_OS_LINUX
405 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
406 "was either not loaded or /dev/vboxdrv was not created for some "
407 "reason. Re-setup the kernel module by executing "
408 "'/etc/init.d/vboxdrv setup' as root");
409#else
410 pszError = N_("VirtualBox kernel driver not installed");
411#endif
412 break;
413 case VERR_NO_MEMORY:
414 pszError = N_("VirtualBox support library out of memory");
415 break;
416 case VERR_VERSION_MISMATCH:
417 case VERR_VM_DRIVER_VERSION_MISMATCH:
418 pszError = N_("The VirtualBox support driver which is running is from a different "
419 "version of VirtualBox. You can correct this by stopping all "
420 "running instances of VirtualBox and reinstalling the software.");
421 break;
422 default:
423 pszError = N_("Unknown error initializing kernel driver");
424 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
425 }
426 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
427 }
428 }
429
430 /* cleanup */
431 vmR3DestroyUVM(pUVM, 2000);
432 LogFlow(("VMR3Create: returns %Rrc\n", rc));
433 return rc;
434}
435
436
437/**
438 * Creates the UVM.
439 *
440 * This will not initialize the support library even if vmR3DestroyUVM
441 * will terminate that.
442 *
443 * @returns VBox status code.
444 * @param cCpus Number of virtual CPUs
445 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
446 * table.
447 * @param ppUVM Where to store the UVM pointer.
448 */
449static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
450{
451 uint32_t i;
452
453 /*
454 * Create and initialize the UVM.
455 */
456 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
457 AssertReturn(pUVM, VERR_NO_MEMORY);
458 pUVM->u32Magic = UVM_MAGIC;
459 pUVM->cCpus = cCpus;
460 pUVM->pVmm2UserMethods = pVmm2UserMethods;
461
462 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
463
464 pUVM->vm.s.cUvmRefs = 1;
465 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
466 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
467 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
468
469 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
470 RTUuidClear(&pUVM->vm.s.Uuid);
471
472 /* Initialize the VMCPU array in the UVM. */
473 for (i = 0; i < cCpus; i++)
474 {
475 pUVM->aCpus[i].pUVM = pUVM;
476 pUVM->aCpus[i].idCpu = i;
477 }
478
479 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
480 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
481 AssertRC(rc);
482 if (RT_SUCCESS(rc))
483 {
484 /* Allocate a halt method event semaphore for each VCPU. */
485 for (i = 0; i < cCpus; i++)
486 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
487 for (i = 0; i < cCpus; i++)
488 {
489 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
490 if (RT_FAILURE(rc))
491 break;
492 }
493 if (RT_SUCCESS(rc))
494 {
495 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
496 if (RT_SUCCESS(rc))
497 {
498 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
499 if (RT_SUCCESS(rc))
500 {
501 /*
502 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
503 */
504 rc = STAMR3InitUVM(pUVM);
505 if (RT_SUCCESS(rc))
506 {
507 rc = MMR3InitUVM(pUVM);
508 if (RT_SUCCESS(rc))
509 {
510 rc = PDMR3InitUVM(pUVM);
511 if (RT_SUCCESS(rc))
512 {
513 /*
514 * Start the emulation threads for all VMCPUs.
515 */
516 for (i = 0; i < cCpus; i++)
517 {
518 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
519 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
520 cCpus > 1 ? "EMT-%u" : "EMT", i);
521 if (RT_FAILURE(rc))
522 break;
523
524 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
525 }
526
527 if (RT_SUCCESS(rc))
528 {
529 *ppUVM = pUVM;
530 return VINF_SUCCESS;
531 }
532
533 /* bail out. */
534 while (i-- > 0)
535 {
536 /** @todo rainy day: terminate the EMTs. */
537 }
538 PDMR3TermUVM(pUVM);
539 }
540 MMR3TermUVM(pUVM);
541 }
542 STAMR3TermUVM(pUVM);
543 }
544 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
545 }
546 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
547 }
548 }
549 for (i = 0; i < cCpus; i++)
550 {
551 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
552 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
553 }
554 RTTlsFree(pUVM->vm.s.idxTLS);
555 }
556 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
557 return rc;
558}
559
560
561/**
562 * Creates and initializes the VM.
563 *
564 * @thread EMT
565 */
566static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
567{
568 /*
569 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
570 */
571 int rc = PDMR3LdrLoadVMMR0U(pUVM);
572 if (RT_FAILURE(rc))
573 {
574 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
575 * bird: what about moving the message down here? Main picks the first message, right? */
576 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
577 return rc; /* proper error message set later on */
578 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
579 }
580
581 /*
582 * Request GVMM to create a new VM for us.
583 */
584 GVMMCREATEVMREQ CreateVMReq;
585 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
586 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
587 CreateVMReq.pSession = pUVM->vm.s.pSession;
588 CreateVMReq.pVMR0 = NIL_RTR0PTR;
589 CreateVMReq.pVMR3 = NULL;
590 CreateVMReq.cCpus = cCpus;
591 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
592 if (RT_SUCCESS(rc))
593 {
594 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
595 AssertRelease(VALID_PTR(pVM));
596 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
597 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
598 AssertRelease(pVM->cCpus == cCpus);
599 AssertRelease(pVM->uCpuExecutionCap == 100);
600 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
601
602 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
603 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
604
605 /*
606 * Initialize the VM structure and our internal data (VMINT).
607 */
608 pVM->pUVM = pUVM;
609
610 for (VMCPUID i = 0; i < pVM->cCpus; i++)
611 {
612 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
613 pVM->aCpus[i].idCpu = i;
614 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
615 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
616 /* hNativeThreadR0 is initialized on EMT registration. */
617 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
618 pUVM->aCpus[i].pVM = pVM;
619 }
620
621
622 /*
623 * Init the configuration.
624 */
625 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
626 if (RT_SUCCESS(rc))
627 {
628 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
629 rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
630 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
631 pVM->fHWACCMEnabled = true;
632
633 /*
634 * If executing in fake suplib mode disable RR3 and RR0 in the config.
635 */
636 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
637 if (psz && !strcmp(psz, "fake"))
638 {
639 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
640 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
641 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
642 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
643 }
644
645 /*
646 * Make sure the CPU count in the config data matches.
647 */
648 if (RT_SUCCESS(rc))
649 {
650 uint32_t cCPUsCfg;
651 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
652 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
653 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
654 {
655 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
656 cCPUsCfg, cCpus));
657 rc = VERR_INVALID_PARAMETER;
658 }
659 }
660
661 /*
662 * Get the CPU execution cap.
663 */
664 if (RT_SUCCESS(rc))
665 {
666 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
667 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc));
668 }
669
670 /*
671 * Get the VM name and UUID.
672 */
673 if (RT_SUCCESS(rc))
674 {
675 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
676 AssertLogRelMsg(RT_SUCCESS(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc));
677 }
678
679 if (RT_SUCCESS(rc))
680 {
681 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
682 AssertLogRelMsg(RT_SUCCESS(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc));
683 }
684
685 if (RT_SUCCESS(rc))
686 {
687 /*
688 * Init the ring-3 components and ring-3 per cpu data, finishing it off
689 * by a relocation round (intermediate context finalization will do this).
690 */
691 rc = vmR3InitRing3(pVM, pUVM);
692 if (RT_SUCCESS(rc))
693 {
694 rc = PGMR3FinalizeMappings(pVM);
695 if (RT_SUCCESS(rc))
696 {
697
698 LogFlow(("Ring-3 init succeeded\n"));
699
700 /*
701 * Init the Ring-0 components.
702 */
703 rc = vmR3InitRing0(pVM);
704 if (RT_SUCCESS(rc))
705 {
706 /* Relocate again, because some switcher fixups depends on R0 init results. */
707 VMR3Relocate(pVM, 0);
708
709#ifdef VBOX_WITH_DEBUGGER
710 /*
711 * Init the tcp debugger console if we're building
712 * with debugger support.
713 */
714 void *pvUser = NULL;
715 rc = DBGCTcpCreate(pVM, &pvUser);
716 if ( RT_SUCCESS(rc)
717 || rc == VERR_NET_ADDRESS_IN_USE)
718 {
719 pUVM->vm.s.pvDBGC = pvUser;
720#endif
721 /*
722 * Init the Guest Context components.
723 */
724 rc = vmR3InitGC(pVM);
725 if (RT_SUCCESS(rc))
726 {
727 /*
728 * Now we can safely set the VM halt method to default.
729 */
730 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
731 if (RT_SUCCESS(rc))
732 {
733 /*
734 * Set the state and link into the global list.
735 */
736 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
737 pUVM->pNext = g_pUVMsHead;
738 g_pUVMsHead = pUVM;
739
740#ifdef LOG_ENABLED
741 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
742#endif
743 return VINF_SUCCESS;
744 }
745 }
746#ifdef VBOX_WITH_DEBUGGER
747 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
748 pUVM->vm.s.pvDBGC = NULL;
749 }
750#endif
751 //..
752 }
753 }
754 vmR3Destroy(pVM);
755 }
756 }
757 //..
758
759 /* Clean CFGM. */
760 int rc2 = CFGMR3Term(pVM);
761 AssertRC(rc2);
762 }
763
764 /*
765 * Do automatic cleanups while the VM structure is still alive and all
766 * references to it are still working.
767 */
768 PDMR3CritSectTerm(pVM);
769
770 /*
771 * Drop all references to VM and the VMCPU structures, then
772 * tell GVMM to destroy the VM.
773 */
774 pUVM->pVM = NULL;
775 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
776 {
777 pUVM->aCpus[i].pVM = NULL;
778 pUVM->aCpus[i].pVCpu = NULL;
779 }
780 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
781
782 if (pUVM->cCpus > 1)
783 {
784 /* Poke the other EMTs since they may have stale pVM and pVCpu references
785 on the stack (see VMR3WaitU for instance) if they've been awakened after
786 VM creation. */
787 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
788 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
789 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
790 }
791
792 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
793 AssertRC(rc2);
794 }
795 else
796 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
797
798 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
799 return rc;
800}
801
802
803/**
804 * Register the calling EMT with GVM.
805 *
806 * @returns VBox status code.
807 * @param pVM The VM handle.
808 * @param idCpu The Virtual CPU ID.
809 */
810static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
811{
812 Assert(VMMGetCpuId(pVM) == idCpu);
813 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
814 if (RT_FAILURE(rc))
815 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
816 return rc;
817}
818
819
820/**
821 * Initializes all R3 components of the VM
822 */
823static int vmR3InitRing3(PVM pVM, PUVM pUVM)
824{
825 int rc;
826
827 /*
828 * Register the other EMTs with GVM.
829 */
830 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
831 {
832 rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
833 if (RT_FAILURE(rc))
834 return rc;
835 }
836
837 /*
838 * Init all R3 components, the order here might be important.
839 */
840 rc = MMR3Init(pVM);
841 if (RT_SUCCESS(rc))
842 {
843 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
844 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
845 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
846 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
847 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
848 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
849 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
850 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
851 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
852 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
853 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
854 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
855 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
856 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
857
858 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
859 {
860 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
861 AssertRC(rc);
862 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
863 AssertRC(rc);
864 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/VM/CPU%d/Halt/BlockOverslept", idCpu);
865 AssertRC(rc);
866 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/VM/CPU%d/Halt/BlockInsomnia", idCpu);
867 AssertRC(rc);
868 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/VM/CPU%d/Halt/BlockOnTime", idCpu);
869 AssertRC(rc);
870 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
871 AssertRC(rc);
872 }
873
874 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
875 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
876 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
877 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
878 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
879 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
880 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
881 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
882
883 rc = CPUMR3Init(pVM);
884 if (RT_SUCCESS(rc))
885 {
886 rc = HWACCMR3Init(pVM);
887 if (RT_SUCCESS(rc))
888 {
889 rc = PGMR3Init(pVM);
890 if (RT_SUCCESS(rc))
891 {
892 rc = REMR3Init(pVM);
893 if (RT_SUCCESS(rc))
894 {
895 rc = MMR3InitPaging(pVM);
896 if (RT_SUCCESS(rc))
897 rc = TMR3Init(pVM);
898 if (RT_SUCCESS(rc))
899 {
900 rc = FTMR3Init(pVM);
901 if (RT_SUCCESS(rc))
902 {
903 rc = VMMR3Init(pVM);
904 if (RT_SUCCESS(rc))
905 {
906 rc = SELMR3Init(pVM);
907 if (RT_SUCCESS(rc))
908 {
909 rc = TRPMR3Init(pVM);
910 if (RT_SUCCESS(rc))
911 {
912 rc = CSAMR3Init(pVM);
913 if (RT_SUCCESS(rc))
914 {
915 rc = PATMR3Init(pVM);
916 if (RT_SUCCESS(rc))
917 {
918 rc = IOMR3Init(pVM);
919 if (RT_SUCCESS(rc))
920 {
921 rc = EMR3Init(pVM);
922 if (RT_SUCCESS(rc))
923 {
924 rc = DBGFR3Init(pVM);
925 if (RT_SUCCESS(rc))
926 {
927 rc = PDMR3Init(pVM);
928 if (RT_SUCCESS(rc))
929 {
930 rc = PGMR3InitDynMap(pVM);
931 if (RT_SUCCESS(rc))
932 rc = MMR3HyperInitFinalize(pVM);
933 if (RT_SUCCESS(rc))
934 rc = PATMR3InitFinalize(pVM);
935 if (RT_SUCCESS(rc))
936 rc = PGMR3InitFinalize(pVM);
937 if (RT_SUCCESS(rc))
938 rc = SELMR3InitFinalize(pVM);
939 if (RT_SUCCESS(rc))
940 rc = TMR3InitFinalize(pVM);
941 if (RT_SUCCESS(rc))
942 rc = REMR3InitFinalize(pVM);
943 if (RT_SUCCESS(rc))
944 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
945 if (RT_SUCCESS(rc))
946 {
947 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
948 return VINF_SUCCESS;
949 }
950 int rc2 = PDMR3Term(pVM);
951 AssertRC(rc2);
952 }
953 int rc2 = DBGFR3Term(pVM);
954 AssertRC(rc2);
955 }
956 int rc2 = EMR3Term(pVM);
957 AssertRC(rc2);
958 }
959 int rc2 = IOMR3Term(pVM);
960 AssertRC(rc2);
961 }
962 int rc2 = PATMR3Term(pVM);
963 AssertRC(rc2);
964 }
965 int rc2 = CSAMR3Term(pVM);
966 AssertRC(rc2);
967 }
968 int rc2 = TRPMR3Term(pVM);
969 AssertRC(rc2);
970 }
971 int rc2 = SELMR3Term(pVM);
972 AssertRC(rc2);
973 }
974 int rc2 = VMMR3Term(pVM);
975 AssertRC(rc2);
976 }
977 int rc2 = FTMR3Term(pVM);
978 AssertRC(rc2);
979 }
980 int rc2 = TMR3Term(pVM);
981 AssertRC(rc2);
982 }
983 int rc2 = REMR3Term(pVM);
984 AssertRC(rc2);
985 }
986 int rc2 = PGMR3Term(pVM);
987 AssertRC(rc2);
988 }
989 int rc2 = HWACCMR3Term(pVM);
990 AssertRC(rc2);
991 }
992 //int rc2 = CPUMR3Term(pVM);
993 //AssertRC(rc2);
994 }
995 /* MMR3Term is not called here because it'll kill the heap. */
996 }
997
998 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
999 return rc;
1000}
1001
1002
1003/**
1004 * Initializes all R0 components of the VM
1005 */
1006static int vmR3InitRing0(PVM pVM)
1007{
1008 LogFlow(("vmR3InitRing0:\n"));
1009
1010 /*
1011 * Check for FAKE suplib mode.
1012 */
1013 int rc = VINF_SUCCESS;
1014 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1015 if (!psz || strcmp(psz, "fake"))
1016 {
1017 /*
1018 * Call the VMMR0 component and let it do the init.
1019 */
1020 rc = VMMR3InitR0(pVM);
1021 }
1022 else
1023 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1024
1025 /*
1026 * Do notifications and return.
1027 */
1028 if (RT_SUCCESS(rc))
1029 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1030
1031 /** @todo Move this to the VMINITCOMPLETED_RING0 notification handler. */
1032 if (RT_SUCCESS(rc))
1033 {
1034 rc = HWACCMR3InitFinalizeR0(pVM);
1035 CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
1036 }
1037
1038 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1039 return rc;
1040}
1041
1042
1043/**
1044 * Initializes all GC components of the VM
1045 */
1046static int vmR3InitGC(PVM pVM)
1047{
1048 LogFlow(("vmR3InitGC:\n"));
1049
1050 /*
1051 * Check for FAKE suplib mode.
1052 */
1053 int rc = VINF_SUCCESS;
1054 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1055 if (!psz || strcmp(psz, "fake"))
1056 {
1057 /*
1058 * Call the VMMR0 component and let it do the init.
1059 */
1060 rc = VMMR3InitRC(pVM);
1061 }
1062 else
1063 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1064
1065 /*
1066 * Do notifications and return.
1067 */
1068 if (RT_SUCCESS(rc))
1069 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1070 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1071 return rc;
1072}
1073
1074
1075/**
1076 * Do init completed notifications.
1077 *
1078 * @returns VBox status code.
1079 * @param pVM The VM handle.
1080 * @param enmWhat What's completed.
1081 */
1082static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1083{
1084 int rc = VMMR3InitCompleted(pVM, enmWhat);
1085 if (RT_SUCCESS(rc))
1086 rc = HWACCMR3InitCompleted(pVM, enmWhat);
1087 return rc;
1088}
1089
1090
1091/**
1092 * Logger callback for inserting a custom prefix.
1093 *
1094 * @returns Number of chars written.
1095 * @param pLogger The logger.
1096 * @param pchBuf The output buffer.
1097 * @param cchBuf The output buffer size.
1098 * @param pvUser Pointer to the UVM structure.
1099 */
1100static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1101{
1102 AssertReturn(cchBuf >= 2, 0);
1103 PUVM pUVM = (PUVM)pvUser;
1104 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1105 if (pUVCpu)
1106 {
1107 static const char s_szHex[17] = "0123456789abcdef";
1108 VMCPUID const idCpu = pUVCpu->idCpu;
1109 pchBuf[1] = s_szHex[ idCpu & 15];
1110 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1111 }
1112 else
1113 {
1114 pchBuf[0] = 'x';
1115 pchBuf[1] = 'y';
1116 }
1117
1118 return 2;
1119}
1120
1121
1122/**
1123 * Calls the relocation functions for all VMM components so they can update
1124 * any GC pointers. When this function is called all the basic VM members
1125 * have been updated and the actual memory relocation have been done
1126 * by the PGM/MM.
1127 *
1128 * This is used both on init and on runtime relocations.
1129 *
1130 * @param pVM VM handle.
1131 * @param offDelta Relocation delta relative to old location.
1132 */
1133VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1134{
1135 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1136
1137 /*
1138 * The order here is very important!
1139 */
1140 PGMR3Relocate(pVM, offDelta);
1141 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1142 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1143 CPUMR3Relocate(pVM);
1144 HWACCMR3Relocate(pVM);
1145 SELMR3Relocate(pVM);
1146 VMMR3Relocate(pVM, offDelta);
1147 SELMR3Relocate(pVM); /* !hack! fix stack! */
1148 TRPMR3Relocate(pVM, offDelta);
1149 PATMR3Relocate(pVM);
1150 CSAMR3Relocate(pVM, offDelta);
1151 IOMR3Relocate(pVM, offDelta);
1152 EMR3Relocate(pVM);
1153 TMR3Relocate(pVM, offDelta);
1154 DBGFR3Relocate(pVM, offDelta);
1155 PDMR3Relocate(pVM, offDelta);
1156}
1157
1158
1159/**
1160 * EMT rendezvous worker for VMR3PowerOn.
1161 *
1162 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1163 * code, see FNVMMEMTRENDEZVOUS.)
1164 *
1165 * @param pVM The VM handle.
1166 * @param pVCpu The VMCPU handle of the EMT.
1167 * @param pvUser Ignored.
1168 */
1169static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1170{
1171 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1172 Assert(!pvUser); NOREF(pvUser);
1173
1174 /*
1175 * The first thread thru here tries to change the state. We shouldn't be
1176 * called again if this fails.
1177 */
1178 if (pVCpu->idCpu == pVM->cCpus - 1)
1179 {
1180 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1181 if (RT_FAILURE(rc))
1182 return rc;
1183 }
1184
1185 VMSTATE enmVMState = VMR3GetState(pVM);
1186 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1187 ("%s\n", VMR3GetStateName(enmVMState)),
1188 VERR_INTERNAL_ERROR_4);
1189
1190 /*
1191 * All EMTs changes their state to started.
1192 */
1193 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1194
1195 /*
1196 * EMT(0) is last thru here and it will make the notification calls
1197 * and advance the state.
1198 */
1199 if (pVCpu->idCpu == 0)
1200 {
1201 PDMR3PowerOn(pVM);
1202 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1203 }
1204
1205 return VINF_SUCCESS;
1206}
1207
1208
1209/**
1210 * Powers on the virtual machine.
1211 *
1212 * @returns VBox status code.
1213 *
1214 * @param pVM The VM to power on.
1215 *
1216 * @thread Any thread.
1217 * @vmstate Created
1218 * @vmstateto PoweringOn+Running
1219 */
1220VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1221{
1222 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1223 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1224
1225 /*
1226 * Gather all the EMTs to reduce the init TSC drift and keep
1227 * the state changing APIs a bit uniform.
1228 */
1229 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1230 vmR3PowerOn, NULL);
1231 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1232 return rc;
1233}
1234
1235
1236/**
1237 * Does the suspend notifications.
1238 *
1239 * @param pVM The VM handle.
1240 * @thread EMT(0)
1241 */
1242static void vmR3SuspendDoWork(PVM pVM)
1243{
1244 PDMR3Suspend(pVM);
1245}
1246
1247
1248/**
1249 * EMT rendezvous worker for VMR3Suspend.
1250 *
1251 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1252 * return code, see FNVMMEMTRENDEZVOUS.)
1253 *
1254 * @param pVM The VM handle.
1255 * @param pVCpu The VMCPU handle of the EMT.
1256 * @param pvUser Ignored.
1257 */
1258static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1259{
1260 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1261 Assert(!pvUser); NOREF(pvUser);
1262
1263 /*
1264 * The first EMT switches the state to suspending. If this fails because
1265 * something was racing us in one way or the other, there will be no more
1266 * calls and thus the state assertion below is not going to annoy anyone.
1267 */
1268 if (pVCpu->idCpu == pVM->cCpus - 1)
1269 {
1270 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1271 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1272 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1273 if (RT_FAILURE(rc))
1274 return rc;
1275 }
1276
1277 VMSTATE enmVMState = VMR3GetState(pVM);
1278 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1279 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1280 ("%s\n", VMR3GetStateName(enmVMState)),
1281 VERR_INTERNAL_ERROR_4);
1282
1283 /*
1284 * EMT(0) does the actually suspending *after* all the other CPUs have
1285 * been thru here.
1286 */
1287 if (pVCpu->idCpu == 0)
1288 {
1289 vmR3SuspendDoWork(pVM);
1290
1291 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1292 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1293 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1294 if (RT_FAILURE(rc))
1295 return VERR_INTERNAL_ERROR_3;
1296 }
1297
1298 return VINF_EM_SUSPEND;
1299}
1300
1301
1302/**
1303 * Suspends a running VM.
1304 *
1305 * @returns VBox status code. When called on EMT, this will be a strict status
1306 * code that has to be propagated up the call stack.
1307 *
1308 * @param pVM The VM to suspend.
1309 *
1310 * @thread Any thread.
1311 * @vmstate Running or RunningLS
1312 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1313 */
1314VMMR3DECL(int) VMR3Suspend(PVM pVM)
1315{
1316 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1317 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1318
1319 /*
1320 * Gather all the EMTs to make sure there are no races before
1321 * changing the VM state.
1322 */
1323 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1324 vmR3Suspend, NULL);
1325 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1326 return rc;
1327}
1328
1329
1330/**
1331 * EMT rendezvous worker for VMR3Resume.
1332 *
1333 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1334 * return code, see FNVMMEMTRENDEZVOUS.)
1335 *
1336 * @param pVM The VM handle.
1337 * @param pVCpu The VMCPU handle of the EMT.
1338 * @param pvUser Ignored.
1339 */
1340static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1341{
1342 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1343 Assert(!pvUser); NOREF(pvUser);
1344
1345 /*
1346 * The first thread thru here tries to change the state. We shouldn't be
1347 * called again if this fails.
1348 */
1349 if (pVCpu->idCpu == pVM->cCpus - 1)
1350 {
1351 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1352 if (RT_FAILURE(rc))
1353 return rc;
1354 }
1355
1356 VMSTATE enmVMState = VMR3GetState(pVM);
1357 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1358 ("%s\n", VMR3GetStateName(enmVMState)),
1359 VERR_INTERNAL_ERROR_4);
1360
1361#if 0
1362 /*
1363 * All EMTs changes their state to started.
1364 */
1365 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1366#endif
1367
1368 /*
1369 * EMT(0) is last thru here and it will make the notification calls
1370 * and advance the state.
1371 */
1372 if (pVCpu->idCpu == 0)
1373 {
1374 PDMR3Resume(pVM);
1375 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1376 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1377 }
1378
1379 return VINF_EM_RESUME;
1380}
1381
1382
1383/**
1384 * Resume VM execution.
1385 *
1386 * @returns VBox status code. When called on EMT, this will be a strict status
1387 * code that has to be propagated up the call stack.
1388 *
1389 * @param pVM The VM to resume.
1390 *
1391 * @thread Any thread.
1392 * @vmstate Suspended
1393 * @vmstateto Running
1394 */
1395VMMR3DECL(int) VMR3Resume(PVM pVM)
1396{
1397 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1398 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1399
1400 /*
1401 * Gather all the EMTs to make sure there are no races before
1402 * changing the VM state.
1403 */
1404 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1405 vmR3Resume, NULL);
1406 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1407 return rc;
1408}
1409
1410
1411/**
1412 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1413 * after the live step has been completed.
1414 *
1415 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1416 * return code, see FNVMMEMTRENDEZVOUS.)
1417 *
1418 * @param pVM The VM handle.
1419 * @param pVCpu The VMCPU handle of the EMT.
1420 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1421 */
1422static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1423{
1424 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1425 bool *pfSuspended = (bool *)pvUser;
1426
1427 /*
1428 * The first thread thru here tries to change the state. We shouldn't be
1429 * called again if this fails.
1430 */
1431 if (pVCpu->idCpu == pVM->cCpus - 1U)
1432 {
1433 PUVM pUVM = pVM->pUVM;
1434 int rc;
1435
1436 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1437 VMSTATE enmVMState = pVM->enmVMState;
1438 switch (enmVMState)
1439 {
1440 case VMSTATE_RUNNING_LS:
1441 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS);
1442 rc = VINF_SUCCESS;
1443 break;
1444
1445 case VMSTATE_SUSPENDED_EXT_LS:
1446 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1447 rc = VINF_SUCCESS;
1448 break;
1449
1450 case VMSTATE_DEBUGGING_LS:
1451 rc = VERR_TRY_AGAIN;
1452 break;
1453
1454 case VMSTATE_OFF_LS:
1455 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS);
1456 rc = VERR_SSM_LIVE_POWERED_OFF;
1457 break;
1458
1459 case VMSTATE_FATAL_ERROR_LS:
1460 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS);
1461 rc = VERR_SSM_LIVE_FATAL_ERROR;
1462 break;
1463
1464 case VMSTATE_GURU_MEDITATION_LS:
1465 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS);
1466 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1467 break;
1468
1469 case VMSTATE_POWERING_OFF_LS:
1470 case VMSTATE_SUSPENDING_EXT_LS:
1471 case VMSTATE_RESETTING_LS:
1472 default:
1473 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1474 rc = VERR_INTERNAL_ERROR_3;
1475 break;
1476 }
1477 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1478 if (RT_FAILURE(rc))
1479 {
1480 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1481 return rc;
1482 }
1483 }
1484
1485 VMSTATE enmVMState = VMR3GetState(pVM);
1486 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1487 ("%s\n", VMR3GetStateName(enmVMState)),
1488 VERR_INTERNAL_ERROR_4);
1489
1490 /*
1491 * Only EMT(0) have work to do since it's last thru here.
1492 */
1493 if (pVCpu->idCpu == 0)
1494 {
1495 vmR3SuspendDoWork(pVM);
1496 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1497 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1498 if (RT_FAILURE(rc))
1499 return VERR_INTERNAL_ERROR_3;
1500
1501 *pfSuspended = true;
1502 }
1503
1504 return VINF_EM_SUSPEND;
1505}
1506
1507
1508/**
1509 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1510 * SSMR3LiveDoStep1 failure.
1511 *
1512 * Doing this as a rendezvous operation avoids all annoying transition
1513 * states.
1514 *
1515 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1516 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1517 *
1518 * @param pVM The VM handle.
1519 * @param pVCpu The VMCPU handle of the EMT.
1520 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1521 */
1522static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1523{
1524 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1525 bool *pfSuspended = (bool *)pvUser;
1526 NOREF(pVCpu);
1527
1528 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1529 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1530 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1531 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1532 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1533 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1534 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1535 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1536 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1537 if (rc == 1)
1538 rc = VERR_SSM_LIVE_POWERED_OFF;
1539 else if (rc == 2)
1540 rc = VERR_SSM_LIVE_FATAL_ERROR;
1541 else if (rc == 3)
1542 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1543 else if (rc == 4)
1544 {
1545 *pfSuspended = true;
1546 rc = VINF_SUCCESS;
1547 }
1548 else if (rc > 0)
1549 rc = VINF_SUCCESS;
1550 return rc;
1551}
1552
1553
1554/**
1555 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1556 *
1557 * @returns VBox status code.
1558 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1559 *
1560 * @param pVM The VM handle.
1561 * @param pSSM The handle of saved state operation.
1562 *
1563 * @thread EMT(0)
1564 */
1565static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1566{
1567 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1568 VM_ASSERT_EMT0(pVM);
1569
1570 /*
1571 * Advance the state and mark if VMR3Suspend was called.
1572 */
1573 int rc = VINF_SUCCESS;
1574 VMSTATE enmVMState = VMR3GetState(pVM);
1575 if (enmVMState == VMSTATE_SUSPENDED_LS)
1576 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1577 else
1578 {
1579 if (enmVMState != VMSTATE_SAVING)
1580 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1581 rc = VINF_SSM_LIVE_SUSPENDED;
1582 }
1583
1584 /*
1585 * Finish up and release the handle. Careful with the status codes.
1586 */
1587 int rc2 = SSMR3LiveDoStep2(pSSM);
1588 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1589 rc = rc2;
1590
1591 rc2 = SSMR3LiveDone(pSSM);
1592 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1593 rc = rc2;
1594
1595 /*
1596 * Advance to the final state and return.
1597 */
1598 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1599 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1600 return rc;
1601}
1602
1603
1604/**
1605 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1606 * SSMR3LiveSave.
1607 *
1608 * @returns VBox status code.
1609 *
1610 * @param pVM The VM handle.
1611 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1612 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1613 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1614 * @param pvStreamOpsUser The user argument to the stream methods.
1615 * @param enmAfter What to do afterwards.
1616 * @param pfnProgress Progress callback. Optional.
1617 * @param pvProgressUser User argument for the progress callback.
1618 * @param ppSSM Where to return the saved state handle in case of a
1619 * live snapshot scenario.
1620 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1621 *
1622 * @thread EMT
1623 */
1624static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1625 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM,
1626 bool fSkipStateChanges)
1627{
1628 int rc = VINF_SUCCESS;
1629
1630 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1631 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1632
1633 /*
1634 * Validate input.
1635 */
1636 AssertPtrNull(pszFilename);
1637 AssertPtrNull(pStreamOps);
1638 AssertPtr(pVM);
1639 Assert( enmAfter == SSMAFTER_DESTROY
1640 || enmAfter == SSMAFTER_CONTINUE
1641 || enmAfter == SSMAFTER_TELEPORT);
1642 AssertPtr(ppSSM);
1643 *ppSSM = NULL;
1644
1645 /*
1646 * Change the state and perform/start the saving.
1647 */
1648 if (!fSkipStateChanges)
1649 {
1650 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1651 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1652 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1653 }
1654 else
1655 {
1656 Assert(enmAfter != SSMAFTER_TELEPORT);
1657 rc = 1;
1658 }
1659
1660 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1661 {
1662 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1663 if (!fSkipStateChanges)
1664 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1665 }
1666 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1667 {
1668 Assert(!fSkipStateChanges);
1669 if (enmAfter == SSMAFTER_TELEPORT)
1670 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1671 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1672 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1673 /* (We're not subject to cancellation just yet.) */
1674 }
1675 else
1676 Assert(RT_FAILURE(rc));
1677 return rc;
1678}
1679
1680
1681/**
1682 * Common worker for VMR3Save and VMR3Teleport.
1683 *
1684 * @returns VBox status code.
1685 *
1686 * @param pVM The VM handle.
1687 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1688 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1689 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1690 * @param pvStreamOpsUser The user argument to the stream methods.
1691 * @param enmAfter What to do afterwards.
1692 * @param pfnProgress Progress callback. Optional.
1693 * @param pvProgressUser User argument for the progress callback.
1694 * @param pfSuspended Set if we suspended the VM.
1695 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1696 *
1697 * @thread Non-EMT
1698 */
1699static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1700 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1701 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended,
1702 bool fSkipStateChanges)
1703{
1704 /*
1705 * Request the operation in EMT(0).
1706 */
1707 PSSMHANDLE pSSM;
1708 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1709 (PFNRT)vmR3Save, 10, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1710 enmAfter, pfnProgress, pvProgressUser, &pSSM, fSkipStateChanges);
1711 if ( RT_SUCCESS(rc)
1712 && pSSM)
1713 {
1714 Assert(!fSkipStateChanges);
1715
1716 /*
1717 * Live snapshot.
1718 *
1719 * The state handling here is kind of tricky, doing it on EMT(0) helps
1720 * a bit. See the VMSTATE diagram for details.
1721 */
1722 rc = SSMR3LiveDoStep1(pSSM);
1723 if (RT_SUCCESS(rc))
1724 {
1725 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1726 for (;;)
1727 {
1728 /* Try suspend the VM. */
1729 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1730 vmR3LiveDoSuspend, pfSuspended);
1731 if (rc != VERR_TRY_AGAIN)
1732 break;
1733
1734 /* Wait for the state to change. */
1735 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1736 }
1737 if (RT_SUCCESS(rc))
1738 rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1739 else
1740 {
1741 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1742 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1743 }
1744 }
1745 else
1746 {
1747 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1748 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1749
1750 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1751 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1752 rc = rc2;
1753 }
1754 }
1755
1756 return rc;
1757}
1758
1759
1760/**
1761 * Save current VM state.
1762 *
1763 * Can be used for both saving the state and creating snapshots.
1764 *
1765 * When called for a VM in the Running state, the saved state is created live
1766 * and the VM is only suspended when the final part of the saving is preformed.
1767 * The VM state will not be restored to Running in this case and it's up to the
1768 * caller to call VMR3Resume if this is desirable. (The rational is that the
1769 * caller probably wish to reconfigure the disks before resuming the VM.)
1770 *
1771 * @returns VBox status code.
1772 *
1773 * @param pVM The VM which state should be saved.
1774 * @param pszFilename The name of the save state file.
1775 * @param pStreamOps The stream methods.
1776 * @param pvStreamOpsUser The user argument to the stream methods.
1777 * @param fContinueAfterwards Whether continue execution afterwards or not.
1778 * When in doubt, set this to true.
1779 * @param pfnProgress Progress callback. Optional.
1780 * @param pvUser User argument for the progress callback.
1781 * @param pfSuspended Set if we suspended the VM.
1782 *
1783 * @thread Non-EMT.
1784 * @vmstate Suspended or Running
1785 * @vmstateto Saving+Suspended or
1786 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1787 */
1788VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
1789{
1790 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1791 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1792
1793 /*
1794 * Validate input.
1795 */
1796 AssertPtr(pfSuspended);
1797 *pfSuspended = false;
1798 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1799 VM_ASSERT_OTHER_THREAD(pVM);
1800 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1801 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1802 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1803
1804 /*
1805 * Join paths with VMR3Teleport.
1806 */
1807 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1808 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1809 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1810 enmAfter, pfnProgress, pvUser, pfSuspended,
1811 false /* fSkipStateChanges */);
1812 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1813 return rc;
1814}
1815
1816/**
1817 * Save current VM state (used by FTM)
1818 *
1819 * Can be used for both saving the state and creating snapshots.
1820 *
1821 * When called for a VM in the Running state, the saved state is created live
1822 * and the VM is only suspended when the final part of the saving is preformed.
1823 * The VM state will not be restored to Running in this case and it's up to the
1824 * caller to call VMR3Resume if this is desirable. (The rational is that the
1825 * caller probably wish to reconfigure the disks before resuming the VM.)
1826 *
1827 * @returns VBox status code.
1828 *
1829 * @param pVM The VM which state should be saved.
1830 * @param pStreamOps The stream methods.
1831 * @param pvStreamOpsUser The user argument to the stream methods.
1832 * @param pfSuspended Set if we suspended the VM.
1833 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1834 *
1835 * @thread Any
1836 * @vmstate Suspended or Running
1837 * @vmstateto Saving+Suspended or
1838 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1839 */
1840VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended,
1841 bool fSkipStateChanges)
1842{
1843 LogFlow(("VMR3SaveFT: pVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
1844 pVM, pStreamOps, pvStreamOpsUser, pfSuspended));
1845
1846 /*
1847 * Validate input.
1848 */
1849 AssertPtr(pfSuspended);
1850 *pfSuspended = false;
1851 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1852 AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
1853
1854 /*
1855 * Join paths with VMR3Teleport.
1856 */
1857 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1858 NULL, pStreamOps, pvStreamOpsUser,
1859 SSMAFTER_CONTINUE, NULL, NULL, pfSuspended,
1860 fSkipStateChanges);
1861 LogFlow(("VMR3SaveFT: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1862 return rc;
1863}
1864
1865
1866/**
1867 * Teleport the VM (aka live migration).
1868 *
1869 * @returns VBox status code.
1870 *
1871 * @param pVM The VM which state should be saved.
1872 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1873 * @param pStreamOps The stream methods.
1874 * @param pvStreamOpsUser The user argument to the stream methods.
1875 * @param pfnProgress Progress callback. Optional.
1876 * @param pvProgressUser User argument for the progress callback.
1877 * @param pfSuspended Set if we suspended the VM.
1878 *
1879 * @thread Non-EMT.
1880 * @vmstate Suspended or Running
1881 * @vmstateto Saving+Suspended or
1882 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1883 */
1884VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1885 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1886{
1887 LogFlow(("VMR3Teleport: pVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1888 pVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1889
1890 /*
1891 * Validate input.
1892 */
1893 AssertPtr(pfSuspended);
1894 *pfSuspended = false;
1895 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1896 VM_ASSERT_OTHER_THREAD(pVM);
1897 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1898 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1899
1900 /*
1901 * Join paths with VMR3Save.
1902 */
1903 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
1904 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1905 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended,
1906 false /* fSkipStateChanges */);
1907 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1908 return rc;
1909}
1910
1911
1912
1913/**
1914 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1915 *
1916 * @returns VBox status code.
1917 *
1918 * @param pVM The VM handle.
1919 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1920 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1921 * @param pvStreamOpsUser The user argument to the stream methods.
1922 * @param pfnProgress Progress callback. Optional.
1923 * @param pvUser User argument for the progress callback.
1924 * @param fTeleporting Indicates whether we're teleporting or not.
1925 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1926 *
1927 * @thread EMT.
1928 */
1929static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1930 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
1931 bool fSkipStateChanges)
1932{
1933 int rc = VINF_SUCCESS;
1934
1935 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1936 pVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1937
1938 /*
1939 * Validate input (paranoia).
1940 */
1941 AssertPtr(pVM);
1942 AssertPtrNull(pszFilename);
1943 AssertPtrNull(pStreamOps);
1944 AssertPtrNull(pfnProgress);
1945
1946 if (!fSkipStateChanges)
1947 {
1948 /*
1949 * Change the state and perform the load.
1950 *
1951 * Always perform a relocation round afterwards to make sure hypervisor
1952 * selectors and such are correct.
1953 */
1954 rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1955 VMSTATE_LOADING, VMSTATE_CREATED,
1956 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1957 if (RT_FAILURE(rc))
1958 return rc;
1959 }
1960 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1961
1962 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pVM);
1963 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1964 if (RT_SUCCESS(rc))
1965 {
1966 VMR3Relocate(pVM, 0 /*offDelta*/);
1967 if (!fSkipStateChanges)
1968 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1969 }
1970 else
1971 {
1972 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1973 if (!fSkipStateChanges)
1974 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1975
1976 if (cErrorsPriorToSave == VMR3GetErrorCount(pVM))
1977 rc = VMSetError(pVM, rc, RT_SRC_POS,
1978 N_("Unable to restore the virtual machine's saved state from '%s'. "
1979 "It may be damaged or from an older version of VirtualBox. "
1980 "Please discard the saved state before starting the virtual machine"),
1981 pszFilename);
1982 }
1983
1984 return rc;
1985}
1986
1987
1988/**
1989 * Loads a VM state into a newly created VM or a one that is suspended.
1990 *
1991 * To restore a saved state on VM startup, call this function and then resume
1992 * the VM instead of powering it on.
1993 *
1994 * @returns VBox status code.
1995 *
1996 * @param pVM The VM handle.
1997 * @param pszFilename The name of the save state file.
1998 * @param pfnProgress Progress callback. Optional.
1999 * @param pvUser User argument for the progress callback.
2000 *
2001 * @thread Any thread.
2002 * @vmstate Created, Suspended
2003 * @vmstateto Loading+Suspended
2004 */
2005VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
2006{
2007 LogFlow(("VMR3LoadFromFile: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
2008 pVM, pszFilename, pszFilename, pfnProgress, pvUser));
2009
2010 /*
2011 * Validate input.
2012 */
2013 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2014 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
2015
2016 /*
2017 * Forward the request to EMT(0). No need to setup a rendezvous here
2018 * since there is no execution taking place when this call is allowed.
2019 */
2020 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2021 pVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
2022 false /*fTeleporting*/, false /* fSkipStateChanges */);
2023 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
2024 return rc;
2025}
2026
2027
2028/**
2029 * VMR3LoadFromFile for arbitrary file streams.
2030 *
2031 * @returns VBox status code.
2032 *
2033 * @param pVM The VM handle.
2034 * @param pStreamOps The stream methods.
2035 * @param pvStreamOpsUser The user argument to the stream methods.
2036 * @param pfnProgress Progress callback. Optional.
2037 * @param pvProgressUser User argument for the progress callback.
2038 *
2039 * @thread Any thread.
2040 * @vmstate Created, Suspended
2041 * @vmstateto Loading+Suspended
2042 */
2043VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2044 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
2045{
2046 LogFlow(("VMR3LoadFromStream: pVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
2047 pVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2048
2049 /*
2050 * Validate input.
2051 */
2052 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2053 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2054
2055 /*
2056 * Forward the request to EMT(0). No need to setup a rendezvous here
2057 * since there is no execution taking place when this call is allowed.
2058 */
2059 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2060 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
2061 true /*fTeleporting*/, false /* fSkipStateChanges */);
2062 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2063 return rc;
2064}
2065
2066
2067/**
2068 * VMR3LoadFromFileFT for arbitrary file streams.
2069 *
2070 * @returns VBox status code.
2071 *
2072 * @param pVM The VM handle.
2073 * @param pStreamOps The stream methods.
2074 * @param pvStreamOpsUser The user argument to the stream methods.
2075 * @param pfnProgress Progress callback. Optional.
2076 * @param pvProgressUser User argument for the progress callback.
2077 *
2078 * @thread Any thread.
2079 * @vmstate Created, Suspended
2080 * @vmstateto Loading+Suspended
2081 */
2082VMMR3DECL(int) VMR3LoadFromStreamFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
2083{
2084 LogFlow(("VMR3LoadFromStreamFT: pVM=%p pStreamOps=%p pvStreamOpsUser=%p\n",
2085 pVM, pStreamOps, pvStreamOpsUser));
2086
2087 /*
2088 * Validate input.
2089 */
2090 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2091 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2092
2093 /*
2094 * Forward the request to EMT(0). No need to setup a rendezvous here
2095 * since there is no execution taking place when this call is allowed.
2096 */
2097 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2098 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
2099 true /*fTeleporting*/, true /* fSkipStateChanges */);
2100 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2101 return rc;
2102}
2103
2104/**
2105 * EMT rendezvous worker for VMR3PowerOff.
2106 *
2107 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
2108 * return code, see FNVMMEMTRENDEZVOUS.)
2109 *
2110 * @param pVM The VM handle.
2111 * @param pVCpu The VMCPU handle of the EMT.
2112 * @param pvUser Ignored.
2113 */
2114static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
2115{
2116 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
2117 Assert(!pvUser); NOREF(pvUser);
2118
2119 /*
2120 * The first EMT thru here will change the state to PoweringOff.
2121 */
2122 if (pVCpu->idCpu == pVM->cCpus - 1)
2123 {
2124 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
2125 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
2126 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
2127 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
2128 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
2129 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2130 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2131 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2132 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2133 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2134 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2135 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2136 if (RT_FAILURE(rc))
2137 return rc;
2138 if (rc >= 7)
2139 SSMR3Cancel(pVM);
2140 }
2141
2142 /*
2143 * Check the state.
2144 */
2145 VMSTATE enmVMState = VMR3GetState(pVM);
2146 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2147 || enmVMState == VMSTATE_POWERING_OFF_LS,
2148 ("%s\n", VMR3GetStateName(enmVMState)),
2149 VERR_VM_INVALID_VM_STATE);
2150
2151 /*
2152 * EMT(0) does the actual power off work here *after* all the other EMTs
2153 * have been thru and entered the STOPPED state.
2154 */
2155 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2156 if (pVCpu->idCpu == 0)
2157 {
2158 /*
2159 * For debugging purposes, we will log a summary of the guest state at this point.
2160 */
2161 if (enmVMState != VMSTATE_GURU_MEDITATION)
2162 {
2163 /** @todo SMP support? */
2164 /** @todo make the state dumping at VMR3PowerOff optional. */
2165 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2166 RTLogRelPrintf("****************** Guest state at power off ******************\n");
2167 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2168 RTLogRelPrintf("***\n");
2169 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
2170 RTLogRelPrintf("***\n");
2171 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2172 RTLogRelPrintf("***\n");
2173 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2174 /** @todo dump guest call stack. */
2175#if 1 // "temporary" while debugging #1589
2176 RTLogRelPrintf("***\n");
2177 uint32_t esp = CPUMGetGuestESP(pVCpu);
2178 if ( CPUMGetGuestSS(pVCpu) == 0
2179 && esp < _64K)
2180 {
2181 uint8_t abBuf[PAGE_SIZE];
2182 RTLogRelPrintf("***\n"
2183 "ss:sp=0000:%04x ", esp);
2184 uint32_t Start = esp & ~(uint32_t)63;
2185 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
2186 if (RT_SUCCESS(rc))
2187 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
2188 "%.*Rhxd\n",
2189 Start, Start + 0x100 - 1,
2190 0x100, abBuf);
2191 else
2192 RTLogRelPrintf("rc=%Rrc\n", rc);
2193
2194 /* grub ... */
2195 if (esp < 0x2000 && esp > 0x1fc0)
2196 {
2197 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
2198 if (RT_SUCCESS(rc))
2199 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
2200 "%.*Rhxd\n",
2201 0x800, abBuf);
2202 }
2203 /* microsoft cdrom hang ... */
2204 if (true)
2205 {
2206 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
2207 if (RT_SUCCESS(rc))
2208 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
2209 "%.*Rhxd\n",
2210 0x200, abBuf);
2211 }
2212 }
2213#endif
2214 RTLogRelSetBuffering(fOldBuffered);
2215 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2216 }
2217
2218 /*
2219 * Perform the power off notifications and advance the state to
2220 * Off or OffLS.
2221 */
2222 PDMR3PowerOff(pVM);
2223
2224 PUVM pUVM = pVM->pUVM;
2225 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2226 enmVMState = pVM->enmVMState;
2227 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2228 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
2229 else
2230 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF);
2231 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2232 }
2233 return VINF_EM_OFF;
2234}
2235
2236
2237/**
2238 * Power off the VM.
2239 *
2240 * @returns VBox status code. When called on EMT, this will be a strict status
2241 * code that has to be propagated up the call stack.
2242 *
2243 * @param pVM The handle of the VM to be powered off.
2244 *
2245 * @thread Any thread.
2246 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2247 * @vmstateto Off or OffLS
2248 */
2249VMMR3DECL(int) VMR3PowerOff(PVM pVM)
2250{
2251 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
2252 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2253
2254 /*
2255 * Gather all the EMTs to make sure there are no races before
2256 * changing the VM state.
2257 */
2258 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2259 vmR3PowerOff, NULL);
2260 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2261 return rc;
2262}
2263
2264
2265/**
2266 * Destroys the VM.
2267 *
2268 * The VM must be powered off (or never really powered on) to call this
2269 * function. The VM handle is destroyed and can no longer be used up successful
2270 * return.
2271 *
2272 * @returns VBox status code.
2273 *
2274 * @param pVM The handle of the VM which should be destroyed.
2275 *
2276 * @thread Any none emulation thread.
2277 * @vmstate Off, Created
2278 * @vmstateto N/A
2279 */
2280VMMR3DECL(int) VMR3Destroy(PVM pVM)
2281{
2282 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
2283
2284 /*
2285 * Validate input.
2286 */
2287 if (!pVM)
2288 return VERR_INVALID_VM_HANDLE;
2289 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2290 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2291
2292 /*
2293 * Change VM state to destroying and unlink the VM.
2294 */
2295 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2296 if (RT_FAILURE(rc))
2297 return rc;
2298
2299 /** @todo lock this when we start having multiple machines in a process... */
2300 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
2301 if (g_pUVMsHead == pUVM)
2302 g_pUVMsHead = pUVM->pNext;
2303 else
2304 {
2305 PUVM pPrev = g_pUVMsHead;
2306 while (pPrev && pPrev->pNext != pUVM)
2307 pPrev = pPrev->pNext;
2308 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
2309
2310 pPrev->pNext = pUVM->pNext;
2311 }
2312 pUVM->pNext = NULL;
2313
2314 /*
2315 * Notify registered at destruction listeners.
2316 */
2317 vmR3AtDtor(pVM);
2318
2319 /*
2320 * Call vmR3Destroy on each of the EMTs ending with EMT(0) doing the bulk
2321 * of the cleanup.
2322 */
2323 /* vmR3Destroy on all EMTs, ending with EMT(0). */
2324 rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2325 AssertLogRelRC(rc);
2326
2327 /* Wait for EMTs and destroy the UVM. */
2328 vmR3DestroyUVM(pUVM, 30000);
2329
2330 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2331 return VINF_SUCCESS;
2332}
2333
2334
2335/**
2336 * Internal destruction worker.
2337 *
2338 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2339 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2340 * VMR3Destroy().
2341 *
2342 * When called on EMT(0), it will performed the great bulk of the destruction.
2343 * When called on the other EMTs, they will do nothing and the whole purpose is
2344 * to return VINF_EM_TERMINATE so they break out of their run loops.
2345 *
2346 * @returns VINF_EM_TERMINATE.
2347 * @param pVM The VM handle.
2348 */
2349DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2350{
2351 PUVM pUVM = pVM->pUVM;
2352 PVMCPU pVCpu = VMMGetCpu(pVM);
2353 Assert(pVCpu);
2354 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2355
2356 /*
2357 * Only VCPU 0 does the full cleanup (last).
2358 */
2359 if (pVCpu->idCpu == 0)
2360 {
2361 /*
2362 * Dump statistics to the log.
2363 */
2364#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2365 RTLogFlags(NULL, "nodisabled nobuffered");
2366#endif
2367#ifdef VBOX_WITH_STATISTICS
2368 STAMR3Dump(pVM, "*");
2369#else
2370 LogRel(("************************* Statistics *************************\n"));
2371 STAMR3DumpToReleaseLog(pVM, "*");
2372 LogRel(("********************* End of statistics **********************\n"));
2373#endif
2374
2375 /*
2376 * Destroy the VM components.
2377 */
2378 int rc = TMR3Term(pVM);
2379 AssertRC(rc);
2380#ifdef VBOX_WITH_DEBUGGER
2381 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
2382 pUVM->vm.s.pvDBGC = NULL;
2383#endif
2384 AssertRC(rc);
2385 rc = FTMR3Term(pVM);
2386 AssertRC(rc);
2387 rc = DBGFR3Term(pVM);
2388 AssertRC(rc);
2389 rc = PDMR3Term(pVM);
2390 AssertRC(rc);
2391 rc = EMR3Term(pVM);
2392 AssertRC(rc);
2393 rc = IOMR3Term(pVM);
2394 AssertRC(rc);
2395 rc = CSAMR3Term(pVM);
2396 AssertRC(rc);
2397 rc = PATMR3Term(pVM);
2398 AssertRC(rc);
2399 rc = TRPMR3Term(pVM);
2400 AssertRC(rc);
2401 rc = SELMR3Term(pVM);
2402 AssertRC(rc);
2403 rc = REMR3Term(pVM);
2404 AssertRC(rc);
2405 rc = HWACCMR3Term(pVM);
2406 AssertRC(rc);
2407 rc = PGMR3Term(pVM);
2408 AssertRC(rc);
2409 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2410 AssertRC(rc);
2411 rc = CPUMR3Term(pVM);
2412 AssertRC(rc);
2413 SSMR3Term(pVM);
2414 rc = PDMR3CritSectTerm(pVM);
2415 AssertRC(rc);
2416 rc = MMR3Term(pVM);
2417 AssertRC(rc);
2418
2419 /*
2420 * We're done, tell the other EMTs to quit.
2421 */
2422 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2423 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2424 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2425 }
2426 return VINF_EM_TERMINATE;
2427}
2428
2429
2430/**
2431 * Destroys the UVM portion.
2432 *
2433 * This is called as the final step in the VM destruction or as the cleanup
2434 * in case of a creation failure.
2435 *
2436 * @param pVM VM Handle.
2437 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2438 * threads.
2439 */
2440static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2441{
2442 /*
2443 * Signal termination of each the emulation threads and
2444 * wait for them to complete.
2445 */
2446 /* Signal them. */
2447 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2448 if (pUVM->pVM)
2449 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2450 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2451 {
2452 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2453 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2454 }
2455
2456 /* Wait for them. */
2457 uint64_t NanoTS = RTTimeNanoTS();
2458 RTTHREAD hSelf = RTThreadSelf();
2459 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2460 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2461 {
2462 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2463 if ( hThread != NIL_RTTHREAD
2464 && hThread != hSelf)
2465 {
2466 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2467 int rc2 = RTThreadWait(hThread,
2468 cMilliesElapsed < cMilliesEMTWait
2469 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2470 : 2000,
2471 NULL);
2472 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2473 rc2 = RTThreadWait(hThread, 1000, NULL);
2474 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2475 if (RT_SUCCESS(rc2))
2476 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2477 }
2478 }
2479
2480 /* Cleanup the semaphores. */
2481 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2482 {
2483 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2484 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2485 }
2486
2487 /*
2488 * Free the event semaphores associated with the request packets.
2489 */
2490 unsigned cReqs = 0;
2491 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2492 {
2493 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2494 pUVM->vm.s.apReqFree[i] = NULL;
2495 for (; pReq; pReq = pReq->pNext, cReqs++)
2496 {
2497 pReq->enmState = VMREQSTATE_INVALID;
2498 RTSemEventDestroy(pReq->EventSem);
2499 }
2500 }
2501 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2502
2503 /*
2504 * Kill all queued requests. (There really shouldn't be any!)
2505 */
2506 for (unsigned i = 0; i < 10; i++)
2507 {
2508 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pReqs, NULL, PVMREQ);
2509 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2510 if (!pReqHead)
2511 break;
2512 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2513 {
2514 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2515 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2516 RTSemEventSignal(pReq->EventSem);
2517 RTThreadSleep(2);
2518 RTSemEventDestroy(pReq->EventSem);
2519 }
2520 /* give them a chance to respond before we free the request memory. */
2521 RTThreadSleep(32);
2522 }
2523
2524 /*
2525 * Now all queued VCPU requests (again, there shouldn't be any).
2526 */
2527 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2528 {
2529 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2530
2531 for (unsigned i = 0; i < 10; i++)
2532 {
2533 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pReqs, NULL, PVMREQ);
2534 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2535 if (!pReqHead)
2536 break;
2537 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2538 {
2539 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2540 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2541 RTSemEventSignal(pReq->EventSem);
2542 RTThreadSleep(2);
2543 RTSemEventDestroy(pReq->EventSem);
2544 }
2545 /* give them a chance to respond before we free the request memory. */
2546 RTThreadSleep(32);
2547 }
2548 }
2549
2550 /*
2551 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2552 */
2553 PDMR3TermUVM(pUVM);
2554
2555 /*
2556 * Terminate the support library if initialized.
2557 */
2558 if (pUVM->vm.s.pSession)
2559 {
2560 int rc = SUPR3Term(false /*fForced*/);
2561 AssertRC(rc);
2562 pUVM->vm.s.pSession = NIL_RTR0PTR;
2563 }
2564
2565 /*
2566 * Release the UVM structure reference.
2567 */
2568 VMR3ReleaseUVM(pUVM);
2569
2570 /*
2571 * Clean up and flush logs.
2572 */
2573#ifdef LOG_ENABLED
2574 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2575#endif
2576 RTLogFlush(NULL);
2577}
2578
2579
2580/**
2581 * Enumerates the VMs in this process.
2582 *
2583 * @returns Pointer to the next VM.
2584 * @returns NULL when no more VMs.
2585 * @param pVMPrev The previous VM
2586 * Use NULL to start the enumeration.
2587 */
2588VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2589{
2590 /*
2591 * This is quick and dirty. It has issues with VM being
2592 * destroyed during the enumeration.
2593 */
2594 PUVM pNext;
2595 if (pVMPrev)
2596 pNext = pVMPrev->pUVM->pNext;
2597 else
2598 pNext = g_pUVMsHead;
2599 return pNext ? pNext->pVM : NULL;
2600}
2601
2602
2603/**
2604 * Registers an at VM destruction callback.
2605 *
2606 * @returns VBox status code.
2607 * @param pfnAtDtor Pointer to callback.
2608 * @param pvUser User argument.
2609 */
2610VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2611{
2612 /*
2613 * Check if already registered.
2614 */
2615 VM_ATDTOR_LOCK();
2616 PVMATDTOR pCur = g_pVMAtDtorHead;
2617 while (pCur)
2618 {
2619 if (pfnAtDtor == pCur->pfnAtDtor)
2620 {
2621 VM_ATDTOR_UNLOCK();
2622 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2623 return VERR_INVALID_PARAMETER;
2624 }
2625
2626 /* next */
2627 pCur = pCur->pNext;
2628 }
2629 VM_ATDTOR_UNLOCK();
2630
2631 /*
2632 * Allocate new entry.
2633 */
2634 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2635 if (!pVMAtDtor)
2636 return VERR_NO_MEMORY;
2637
2638 VM_ATDTOR_LOCK();
2639 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2640 pVMAtDtor->pvUser = pvUser;
2641 pVMAtDtor->pNext = g_pVMAtDtorHead;
2642 g_pVMAtDtorHead = pVMAtDtor;
2643 VM_ATDTOR_UNLOCK();
2644
2645 return VINF_SUCCESS;
2646}
2647
2648
2649/**
2650 * Deregisters an at VM destruction callback.
2651 *
2652 * @returns VBox status code.
2653 * @param pfnAtDtor Pointer to callback.
2654 */
2655VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2656{
2657 /*
2658 * Find it, unlink it and free it.
2659 */
2660 VM_ATDTOR_LOCK();
2661 PVMATDTOR pPrev = NULL;
2662 PVMATDTOR pCur = g_pVMAtDtorHead;
2663 while (pCur)
2664 {
2665 if (pfnAtDtor == pCur->pfnAtDtor)
2666 {
2667 if (pPrev)
2668 pPrev->pNext = pCur->pNext;
2669 else
2670 g_pVMAtDtorHead = pCur->pNext;
2671 pCur->pNext = NULL;
2672 VM_ATDTOR_UNLOCK();
2673
2674 RTMemFree(pCur);
2675 return VINF_SUCCESS;
2676 }
2677
2678 /* next */
2679 pPrev = pCur;
2680 pCur = pCur->pNext;
2681 }
2682 VM_ATDTOR_UNLOCK();
2683
2684 return VERR_INVALID_PARAMETER;
2685}
2686
2687
2688/**
2689 * Walks the list of at VM destructor callbacks.
2690 * @param pVM The VM which is about to be destroyed.
2691 */
2692static void vmR3AtDtor(PVM pVM)
2693{
2694 /*
2695 * Find it, unlink it and free it.
2696 */
2697 VM_ATDTOR_LOCK();
2698 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2699 pCur->pfnAtDtor(pVM, pCur->pvUser);
2700 VM_ATDTOR_UNLOCK();
2701}
2702
2703
2704/**
2705 * Worker which checks integrity of some internal structures.
2706 * This is yet another attempt to track down that AVL tree crash.
2707 */
2708static void vmR3CheckIntegrity(PVM pVM)
2709{
2710#ifdef VBOX_STRICT
2711 int rc = PGMR3CheckIntegrity(pVM);
2712 AssertReleaseRC(rc);
2713#endif
2714}
2715
2716
2717/**
2718 * EMT rendezvous worker for VMR3Reset.
2719 *
2720 * This is called by the emulation threads as a response to the reset request
2721 * issued by VMR3Reset().
2722 *
2723 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2724 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2725 *
2726 * @param pVM The VM handle.
2727 * @param pVCpu The VMCPU handle of the EMT.
2728 * @param pvUser Ignored.
2729 */
2730static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2731{
2732 Assert(!pvUser); NOREF(pvUser);
2733
2734 /*
2735 * The first EMT will try change the state to resetting. If this fails,
2736 * we won't get called for the other EMTs.
2737 */
2738 if (pVCpu->idCpu == pVM->cCpus - 1)
2739 {
2740 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3,
2741 VMSTATE_RESETTING, VMSTATE_RUNNING,
2742 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2743 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2744 if (RT_FAILURE(rc))
2745 return rc;
2746 }
2747
2748 /*
2749 * Check the state.
2750 */
2751 VMSTATE enmVMState = VMR3GetState(pVM);
2752 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2753 || enmVMState == VMSTATE_RESETTING_LS,
2754 ("%s\n", VMR3GetStateName(enmVMState)),
2755 VERR_INTERNAL_ERROR_4);
2756
2757 /*
2758 * EMT(0) does the full cleanup *after* all the other EMTs has been
2759 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2760 *
2761 * Because there are per-cpu reset routines and order may/is important,
2762 * the following sequence looks a bit ugly...
2763 */
2764 if (pVCpu->idCpu == 0)
2765 vmR3CheckIntegrity(pVM);
2766
2767 /* Reset the VCpu state. */
2768 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2769
2770 /* Clear all pending forced actions. */
2771 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2772
2773 /*
2774 * Reset the VM components.
2775 */
2776 if (pVCpu->idCpu == 0)
2777 {
2778 PATMR3Reset(pVM);
2779 CSAMR3Reset(pVM);
2780 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2781 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2782/** @todo PGMR3Reset should be called after PDMR3Reset really, because we'll trash OS <-> hardware
2783 * communication structures residing in RAM when done in the other order. I.e. the device must be
2784 * quiesced first, then we clear the memory and plan tables. Probably have to make these things
2785 * explicit in some way, some memory setup pass or something.
2786 * (Example: DevAHCI may assert if memory is zeroed before it has read the FIS.)
2787 *
2788 * @bugref{4467}
2789 */
2790 MMR3Reset(pVM);
2791 PDMR3Reset(pVM);
2792 SELMR3Reset(pVM);
2793 TRPMR3Reset(pVM);
2794 REMR3Reset(pVM);
2795 IOMR3Reset(pVM);
2796 CPUMR3Reset(pVM);
2797 }
2798 CPUMR3ResetCpu(pVCpu);
2799 if (pVCpu->idCpu == 0)
2800 {
2801 TMR3Reset(pVM);
2802 EMR3Reset(pVM);
2803 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2804
2805#ifdef LOG_ENABLED
2806 /*
2807 * Debug logging.
2808 */
2809 RTLogPrintf("\n\nThe VM was reset:\n");
2810 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2811#endif
2812
2813 /*
2814 * Since EMT(0) is the last to go thru here, it will advance the state.
2815 * When a live save is active, we will move on to SuspendingLS but
2816 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2817 */
2818 PUVM pUVM = pVM->pUVM;
2819 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2820 enmVMState = pVM->enmVMState;
2821 if (enmVMState == VMSTATE_RESETTING)
2822 {
2823 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2824 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2825 else
2826 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING);
2827 }
2828 else
2829 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS);
2830 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2831
2832 vmR3CheckIntegrity(pVM);
2833
2834 /*
2835 * Do the suspend bit as well.
2836 * It only requires some EMT(0) work at present.
2837 */
2838 if (enmVMState != VMSTATE_RESETTING)
2839 {
2840 vmR3SuspendDoWork(pVM);
2841 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2842 }
2843 }
2844
2845 return enmVMState == VMSTATE_RESETTING
2846 ? VINF_EM_RESET
2847 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2848}
2849
2850
2851/**
2852 * Reset the current VM.
2853 *
2854 * @returns VBox status code.
2855 * @param pVM VM to reset.
2856 */
2857VMMR3DECL(int) VMR3Reset(PVM pVM)
2858{
2859 LogFlow(("VMR3Reset:\n"));
2860 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2861
2862 /*
2863 * Gather all the EMTs to make sure there are no races before
2864 * changing the VM state.
2865 */
2866 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2867 vmR3Reset, NULL);
2868 LogFlow(("VMR3Reset: returns %Rrc\n", rc));
2869 return rc;
2870}
2871
2872
2873/**
2874 * Gets the user mode VM structure pointer given the VM handle.
2875 *
2876 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
2877 * invalid (asserted).
2878 * @param pVM The VM handle.
2879 * @sa VMR3GetVM, VMR3RetainUVM
2880 */
2881VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
2882{
2883 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
2884 return pVM->pUVM;
2885}
2886
2887
2888/**
2889 * Gets the shared VM structure pointer given the pointer to the user mode VM
2890 * structure.
2891 *
2892 * @returns Pointer to the shared VM structure.
2893 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
2894 * is currently associated with it.
2895 * @param pUVM The user mode VM handle.
2896 * @sa VMR3GetUVM
2897 */
2898VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
2899{
2900 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2901 return pUVM->pVM;
2902}
2903
2904
2905/**
2906 * Retain the user mode VM handle.
2907 *
2908 * @returns Reference count.
2909 * UINT32_MAX if @a pUVM is invalid.
2910 *
2911 * @param pUVM The user mode VM handle.
2912 * @sa VMR3ReleaseUVM
2913 */
2914VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
2915{
2916 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2917 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
2918 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
2919 return cRefs;
2920}
2921
2922
2923/**
2924 * Does the final release of the UVM structure.
2925 *
2926 * @param pUVM The user mode VM handle.
2927 */
2928static void vmR3DoReleaseUVM(PUVM pUVM)
2929{
2930 /*
2931 * Free the UVM.
2932 */
2933 Assert(!pUVM->pVM);
2934
2935 MMR3TermUVM(pUVM);
2936 STAMR3TermUVM(pUVM);
2937
2938 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2939 RTTlsFree(pUVM->vm.s.idxTLS);
2940 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
2941}
2942
2943
2944/**
2945 * Releases a refernece to the mode VM handle.
2946 *
2947 * @returns The new reference count, 0 if destroyed.
2948 * UINT32_MAX if @a pUVM is invalid.
2949 *
2950 * @param pUVM The user mode VM handle.
2951 * @sa VMR3RetainUVM
2952 */
2953VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
2954{
2955 if (!pUVM)
2956 return 0;
2957 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2958 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
2959 if (!cRefs)
2960 vmR3DoReleaseUVM(pUVM);
2961 else
2962 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
2963 return cRefs;
2964}
2965
2966
2967/**
2968 * Gets the VM name.
2969 *
2970 * @returns Pointer to a read-only string containing the name. NULL if called
2971 * too early.
2972 * @param pUVM The user mode VM handle.
2973 */
2974VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
2975{
2976 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2977 return pUVM->vm.s.pszName;
2978}
2979
2980
2981/**
2982 * Gets the VM UUID.
2983 *
2984 * @returns pUuid on success, NULL on failure.
2985 * @param pUVM The user mode VM handle.
2986 * @param pUuid Where to store the UUID.
2987 */
2988VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
2989{
2990 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2991 AssertPtrReturn(pUuid, NULL);
2992
2993 *pUuid = pUVM->vm.s.Uuid;
2994 return pUuid;
2995}
2996
2997
2998/**
2999 * Gets the current VM state.
3000 *
3001 * @returns The current VM state.
3002 * @param pVM VM handle.
3003 * @thread Any
3004 */
3005VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
3006{
3007 VM_ASSERT_VALID_EXT_RETURN(pVM, VMSTATE_TERMINATED);
3008 return pVM->enmVMState;
3009}
3010
3011
3012/**
3013 * Gets the current VM state.
3014 *
3015 * @returns The current VM state.
3016 * @param pUVM The user-mode VM handle.
3017 * @thread Any
3018 */
3019VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
3020{
3021 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
3022 if (RT_UNLIKELY(!pUVM->pVM))
3023 return VMSTATE_TERMINATED;
3024 return pUVM->pVM->enmVMState;
3025}
3026
3027
3028/**
3029 * Gets the state name string for a VM state.
3030 *
3031 * @returns Pointer to the state name. (readonly)
3032 * @param enmState The state.
3033 */
3034VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
3035{
3036 switch (enmState)
3037 {
3038 case VMSTATE_CREATING: return "CREATING";
3039 case VMSTATE_CREATED: return "CREATED";
3040 case VMSTATE_LOADING: return "LOADING";
3041 case VMSTATE_POWERING_ON: return "POWERING_ON";
3042 case VMSTATE_RESUMING: return "RESUMING";
3043 case VMSTATE_RUNNING: return "RUNNING";
3044 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
3045 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
3046 case VMSTATE_RESETTING: return "RESETTING";
3047 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
3048 case VMSTATE_SUSPENDED: return "SUSPENDED";
3049 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
3050 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
3051 case VMSTATE_SUSPENDING: return "SUSPENDING";
3052 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
3053 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
3054 case VMSTATE_SAVING: return "SAVING";
3055 case VMSTATE_DEBUGGING: return "DEBUGGING";
3056 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
3057 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
3058 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
3059 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
3060 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
3061 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
3062 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
3063 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
3064 case VMSTATE_OFF: return "OFF";
3065 case VMSTATE_OFF_LS: return "OFF_LS";
3066 case VMSTATE_DESTROYING: return "DESTROYING";
3067 case VMSTATE_TERMINATED: return "TERMINATED";
3068
3069 default:
3070 AssertMsgFailed(("Unknown state %d\n", enmState));
3071 return "Unknown!\n";
3072 }
3073}
3074
3075
3076/**
3077 * Validates the state transition in strict builds.
3078 *
3079 * @returns true if valid, false if not.
3080 *
3081 * @param enmStateOld The old (current) state.
3082 * @param enmStateNew The proposed new state.
3083 *
3084 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
3085 * diagram (under State Machine Diagram).
3086 */
3087static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
3088{
3089#ifdef VBOX_STRICT
3090 switch (enmStateOld)
3091 {
3092 case VMSTATE_CREATING:
3093 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3094 break;
3095
3096 case VMSTATE_CREATED:
3097 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
3098 || enmStateNew == VMSTATE_POWERING_ON
3099 || enmStateNew == VMSTATE_POWERING_OFF
3100 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3101 break;
3102
3103 case VMSTATE_LOADING:
3104 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3105 || enmStateNew == VMSTATE_LOAD_FAILURE
3106 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3107 break;
3108
3109 case VMSTATE_POWERING_ON:
3110 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3111 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3112 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3113 break;
3114
3115 case VMSTATE_RESUMING:
3116 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3117 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3118 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3119 break;
3120
3121 case VMSTATE_RUNNING:
3122 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3123 || enmStateNew == VMSTATE_SUSPENDING
3124 || enmStateNew == VMSTATE_RESETTING
3125 || enmStateNew == VMSTATE_RUNNING_LS
3126 || enmStateNew == VMSTATE_RUNNING_FT
3127 || enmStateNew == VMSTATE_DEBUGGING
3128 || enmStateNew == VMSTATE_FATAL_ERROR
3129 || enmStateNew == VMSTATE_GURU_MEDITATION
3130 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3131 break;
3132
3133 case VMSTATE_RUNNING_LS:
3134 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3135 || enmStateNew == VMSTATE_SUSPENDING_LS
3136 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3137 || enmStateNew == VMSTATE_RESETTING_LS
3138 || enmStateNew == VMSTATE_RUNNING
3139 || enmStateNew == VMSTATE_DEBUGGING_LS
3140 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3141 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3142 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3143 break;
3144
3145 case VMSTATE_RUNNING_FT:
3146 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3147 || enmStateNew == VMSTATE_FATAL_ERROR
3148 || enmStateNew == VMSTATE_GURU_MEDITATION
3149 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3150 break;
3151
3152 case VMSTATE_RESETTING:
3153 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3154 break;
3155
3156 case VMSTATE_RESETTING_LS:
3157 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3158 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3159 break;
3160
3161 case VMSTATE_SUSPENDING:
3162 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3163 break;
3164
3165 case VMSTATE_SUSPENDING_LS:
3166 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3167 || enmStateNew == VMSTATE_SUSPENDED_LS
3168 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3169 break;
3170
3171 case VMSTATE_SUSPENDING_EXT_LS:
3172 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3173 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3174 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3175 break;
3176
3177 case VMSTATE_SUSPENDED:
3178 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3179 || enmStateNew == VMSTATE_SAVING
3180 || enmStateNew == VMSTATE_RESETTING
3181 || enmStateNew == VMSTATE_RESUMING
3182 || enmStateNew == VMSTATE_LOADING
3183 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3184 break;
3185
3186 case VMSTATE_SUSPENDED_LS:
3187 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3188 || enmStateNew == VMSTATE_SAVING
3189 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3190 break;
3191
3192 case VMSTATE_SUSPENDED_EXT_LS:
3193 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3194 || enmStateNew == VMSTATE_SAVING
3195 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3196 break;
3197
3198 case VMSTATE_SAVING:
3199 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3200 break;
3201
3202 case VMSTATE_DEBUGGING:
3203 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3204 || enmStateNew == VMSTATE_POWERING_OFF
3205 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3206 break;
3207
3208 case VMSTATE_DEBUGGING_LS:
3209 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3210 || enmStateNew == VMSTATE_RUNNING_LS
3211 || enmStateNew == VMSTATE_POWERING_OFF_LS
3212 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3213 break;
3214
3215 case VMSTATE_POWERING_OFF:
3216 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3217 break;
3218
3219 case VMSTATE_POWERING_OFF_LS:
3220 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3221 || enmStateNew == VMSTATE_OFF_LS
3222 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3223 break;
3224
3225 case VMSTATE_OFF:
3226 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3227 break;
3228
3229 case VMSTATE_OFF_LS:
3230 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3231 break;
3232
3233 case VMSTATE_FATAL_ERROR:
3234 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3235 break;
3236
3237 case VMSTATE_FATAL_ERROR_LS:
3238 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3239 || enmStateNew == VMSTATE_POWERING_OFF_LS
3240 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3241 break;
3242
3243 case VMSTATE_GURU_MEDITATION:
3244 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3245 || enmStateNew == VMSTATE_POWERING_OFF
3246 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3247 break;
3248
3249 case VMSTATE_GURU_MEDITATION_LS:
3250 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3251 || enmStateNew == VMSTATE_DEBUGGING_LS
3252 || enmStateNew == VMSTATE_POWERING_OFF_LS
3253 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3254 break;
3255
3256 case VMSTATE_LOAD_FAILURE:
3257 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3258 break;
3259
3260 case VMSTATE_DESTROYING:
3261 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3262 break;
3263
3264 case VMSTATE_TERMINATED:
3265 default:
3266 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3267 break;
3268 }
3269#endif /* VBOX_STRICT */
3270 return true;
3271}
3272
3273
3274/**
3275 * Does the state change callouts.
3276 *
3277 * The caller owns the AtStateCritSect.
3278 *
3279 * @param pVM The VM handle.
3280 * @param pUVM The UVM handle.
3281 * @param enmStateNew The New state.
3282 * @param enmStateOld The old state.
3283 */
3284static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3285{
3286 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3287
3288 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3289 {
3290 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
3291 if ( enmStateNew != VMSTATE_DESTROYING
3292 && pVM->enmVMState == VMSTATE_DESTROYING)
3293 break;
3294 AssertMsg(pVM->enmVMState == enmStateNew,
3295 ("You are not allowed to change the state while in the change callback, except "
3296 "from destroying the VM. There are restrictions in the way the state changes "
3297 "are propagated up to the EM execution loop and it makes the program flow very "
3298 "difficult to follow. (%s, expected %s, old %s)\n",
3299 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3300 VMR3GetStateName(enmStateOld)));
3301 }
3302}
3303
3304
3305/**
3306 * Sets the current VM state, with the AtStatCritSect already entered.
3307 *
3308 * @param pVM The VM handle.
3309 * @param pUVM The UVM handle.
3310 * @param enmStateNew The new state.
3311 * @param enmStateOld The old state.
3312 */
3313static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3314{
3315 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3316
3317 AssertMsg(pVM->enmVMState == enmStateOld,
3318 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3319 pUVM->vm.s.enmPrevVMState = enmStateOld;
3320 pVM->enmVMState = enmStateNew;
3321 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3322
3323 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3324}
3325
3326
3327/**
3328 * Sets the current VM state.
3329 *
3330 * @param pVM VM handle.
3331 * @param enmStateNew The new state.
3332 * @param enmStateOld The old state (for asserting only).
3333 */
3334static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3335{
3336 PUVM pUVM = pVM->pUVM;
3337 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3338
3339 AssertMsg(pVM->enmVMState == enmStateOld,
3340 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3341 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
3342
3343 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3344}
3345
3346
3347/**
3348 * Tries to perform a state transition.
3349 *
3350 * @returns The 1-based ordinal of the succeeding transition.
3351 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3352 *
3353 * @param pVM The VM handle.
3354 * @param pszWho Who is trying to change it.
3355 * @param cTransitions The number of transitions in the ellipsis.
3356 * @param ... Transition pairs; new, old.
3357 */
3358static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3359{
3360 va_list va;
3361 VMSTATE enmStateNew = VMSTATE_CREATED;
3362 VMSTATE enmStateOld = VMSTATE_CREATED;
3363
3364#ifdef VBOX_STRICT
3365 /*
3366 * Validate the input first.
3367 */
3368 va_start(va, cTransitions);
3369 for (unsigned i = 0; i < cTransitions; i++)
3370 {
3371 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3372 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3373 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3374 }
3375 va_end(va);
3376#endif
3377
3378 /*
3379 * Grab the lock and see if any of the proposed transitions works out.
3380 */
3381 va_start(va, cTransitions);
3382 int rc = VERR_VM_INVALID_VM_STATE;
3383 PUVM pUVM = pVM->pUVM;
3384 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3385
3386 VMSTATE enmStateCur = pVM->enmVMState;
3387
3388 for (unsigned i = 0; i < cTransitions; i++)
3389 {
3390 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3391 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3392 if (enmStateCur == enmStateOld)
3393 {
3394 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
3395 rc = i + 1;
3396 break;
3397 }
3398 }
3399
3400 if (RT_FAILURE(rc))
3401 {
3402 /*
3403 * Complain about it.
3404 */
3405 if (cTransitions == 1)
3406 {
3407 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3408 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3409 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3410 N_("%s failed because the VM state is %s instead of %s"),
3411 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3412 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3413 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3414 }
3415 else
3416 {
3417 va_end(va);
3418 va_start(va, cTransitions);
3419 LogRel(("%s:\n", pszWho));
3420 for (unsigned i = 0; i < cTransitions; i++)
3421 {
3422 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3423 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3424 LogRel(("%s%s -> %s",
3425 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3426 }
3427 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3428 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3429 N_("%s failed because the current VM state, %s, was not found in the state transition table"),
3430 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3431 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3432 pszWho, VMR3GetStateName(enmStateCur)));
3433 }
3434 }
3435
3436 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3437 va_end(va);
3438 Assert(rc > 0 || rc < 0);
3439 return rc;
3440}
3441
3442
3443/**
3444 * Flag a guru meditation ... a hack.
3445 *
3446 * @param pVM The VM handle
3447 *
3448 * @todo Rewrite this part. The guru meditation should be flagged
3449 * immediately by the VMM and not by VMEmt.cpp when it's all over.
3450 */
3451void vmR3SetGuruMeditation(PVM pVM)
3452{
3453 PUVM pUVM = pVM->pUVM;
3454 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3455
3456 VMSTATE enmStateCur = pVM->enmVMState;
3457 if (enmStateCur == VMSTATE_RUNNING)
3458 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING);
3459 else if (enmStateCur == VMSTATE_RUNNING_LS)
3460 {
3461 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
3462 SSMR3Cancel(pVM);
3463 }
3464
3465 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3466}
3467
3468
3469/**
3470 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3471 *
3472 * @param pVM The VM handle.
3473 */
3474void vmR3SetTerminated(PVM pVM)
3475{
3476 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3477}
3478
3479
3480/**
3481 * Checks if the VM was teleported and hasn't been fully resumed yet.
3482 *
3483 * This applies to both sides of the teleportation since we may leave a working
3484 * clone behind and the user is allowed to resume this...
3485 *
3486 * @returns true / false.
3487 * @param pVM The VM handle.
3488 * @thread Any thread.
3489 */
3490VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3491{
3492 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3493 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3494}
3495
3496
3497/**
3498 * Registers a VM state change callback.
3499 *
3500 * You are not allowed to call any function which changes the VM state from a
3501 * state callback.
3502 *
3503 * @returns VBox status code.
3504 * @param pVM VM handle.
3505 * @param pfnAtState Pointer to callback.
3506 * @param pvUser User argument.
3507 * @thread Any.
3508 */
3509VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3510{
3511 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3512
3513 /*
3514 * Validate input.
3515 */
3516 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3517 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3518
3519 /*
3520 * Allocate a new record.
3521 */
3522 PUVM pUVM = pVM->pUVM;
3523 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3524 if (!pNew)
3525 return VERR_NO_MEMORY;
3526
3527 /* fill */
3528 pNew->pfnAtState = pfnAtState;
3529 pNew->pvUser = pvUser;
3530
3531 /* insert */
3532 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3533 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3534 *pUVM->vm.s.ppAtStateNext = pNew;
3535 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3536 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3537
3538 return VINF_SUCCESS;
3539}
3540
3541
3542/**
3543 * Deregisters a VM state change callback.
3544 *
3545 * @returns VBox status code.
3546 * @param pVM VM handle.
3547 * @param pfnAtState Pointer to callback.
3548 * @param pvUser User argument.
3549 * @thread Any.
3550 */
3551VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3552{
3553 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3554
3555 /*
3556 * Validate input.
3557 */
3558 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3559 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3560
3561 PUVM pUVM = pVM->pUVM;
3562 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3563
3564 /*
3565 * Search the list for the entry.
3566 */
3567 PVMATSTATE pPrev = NULL;
3568 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3569 while ( pCur
3570 && ( pCur->pfnAtState != pfnAtState
3571 || pCur->pvUser != pvUser))
3572 {
3573 pPrev = pCur;
3574 pCur = pCur->pNext;
3575 }
3576 if (!pCur)
3577 {
3578 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3579 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3580 return VERR_FILE_NOT_FOUND;
3581 }
3582
3583 /*
3584 * Unlink it.
3585 */
3586 if (pPrev)
3587 {
3588 pPrev->pNext = pCur->pNext;
3589 if (!pCur->pNext)
3590 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3591 }
3592 else
3593 {
3594 pUVM->vm.s.pAtState = pCur->pNext;
3595 if (!pCur->pNext)
3596 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3597 }
3598
3599 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3600
3601 /*
3602 * Free it.
3603 */
3604 pCur->pfnAtState = NULL;
3605 pCur->pNext = NULL;
3606 MMR3HeapFree(pCur);
3607
3608 return VINF_SUCCESS;
3609}
3610
3611
3612/**
3613 * Registers a VM error callback.
3614 *
3615 * @returns VBox status code.
3616 * @param pVM The VM handle.
3617 * @param pfnAtError Pointer to callback.
3618 * @param pvUser User argument.
3619 * @thread Any.
3620 */
3621VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3622{
3623 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3624 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3625}
3626
3627
3628/**
3629 * Registers a VM error callback.
3630 *
3631 * @returns VBox status code.
3632 * @param pUVM The VM handle.
3633 * @param pfnAtError Pointer to callback.
3634 * @param pvUser User argument.
3635 * @thread Any.
3636 */
3637VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3638{
3639 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3640
3641 /*
3642 * Validate input.
3643 */
3644 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3645 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3646
3647 /*
3648 * Allocate a new record.
3649 */
3650 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3651 if (!pNew)
3652 return VERR_NO_MEMORY;
3653
3654 /* fill */
3655 pNew->pfnAtError = pfnAtError;
3656 pNew->pvUser = pvUser;
3657
3658 /* insert */
3659 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3660 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3661 *pUVM->vm.s.ppAtErrorNext = pNew;
3662 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3663 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3664
3665 return VINF_SUCCESS;
3666}
3667
3668
3669/**
3670 * Deregisters a VM error callback.
3671 *
3672 * @returns VBox status code.
3673 * @param pVM The VM handle.
3674 * @param pfnAtError Pointer to callback.
3675 * @param pvUser User argument.
3676 * @thread Any.
3677 */
3678VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3679{
3680 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3681
3682 /*
3683 * Validate input.
3684 */
3685 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3686 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3687
3688 PUVM pUVM = pVM->pUVM;
3689 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3690
3691 /*
3692 * Search the list for the entry.
3693 */
3694 PVMATERROR pPrev = NULL;
3695 PVMATERROR pCur = pUVM->vm.s.pAtError;
3696 while ( pCur
3697 && ( pCur->pfnAtError != pfnAtError
3698 || pCur->pvUser != pvUser))
3699 {
3700 pPrev = pCur;
3701 pCur = pCur->pNext;
3702 }
3703 if (!pCur)
3704 {
3705 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3706 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3707 return VERR_FILE_NOT_FOUND;
3708 }
3709
3710 /*
3711 * Unlink it.
3712 */
3713 if (pPrev)
3714 {
3715 pPrev->pNext = pCur->pNext;
3716 if (!pCur->pNext)
3717 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3718 }
3719 else
3720 {
3721 pUVM->vm.s.pAtError = pCur->pNext;
3722 if (!pCur->pNext)
3723 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3724 }
3725
3726 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3727
3728 /*
3729 * Free it.
3730 */
3731 pCur->pfnAtError = NULL;
3732 pCur->pNext = NULL;
3733 MMR3HeapFree(pCur);
3734
3735 return VINF_SUCCESS;
3736}
3737
3738
3739/**
3740 * Ellipsis to va_list wrapper for calling pfnAtError.
3741 */
3742static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3743{
3744 va_list va;
3745 va_start(va, pszFormat);
3746 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3747 va_end(va);
3748}
3749
3750
3751/**
3752 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3753 * The message is found in VMINT.
3754 *
3755 * @param pVM The VM handle.
3756 * @thread EMT.
3757 */
3758VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3759{
3760 VM_ASSERT_EMT(pVM);
3761 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contracts!\n"));
3762
3763 /*
3764 * Unpack the error (if we managed to format one).
3765 */
3766 PVMERROR pErr = pVM->vm.s.pErrorR3;
3767 const char *pszFile = NULL;
3768 const char *pszFunction = NULL;
3769 uint32_t iLine = 0;
3770 const char *pszMessage;
3771 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3772 if (pErr)
3773 {
3774 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3775 if (pErr->offFile)
3776 pszFile = (const char *)pErr + pErr->offFile;
3777 iLine = pErr->iLine;
3778 if (pErr->offFunction)
3779 pszFunction = (const char *)pErr + pErr->offFunction;
3780 if (pErr->offMessage)
3781 pszMessage = (const char *)pErr + pErr->offMessage;
3782 else
3783 pszMessage = "No message!";
3784 }
3785 else
3786 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3787
3788 /*
3789 * Call the at error callbacks.
3790 */
3791 PUVM pUVM = pVM->pUVM;
3792 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3793 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3794 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3795 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3796 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3797}
3798
3799
3800/**
3801 * Gets the number of errors raised via VMSetError.
3802 *
3803 * This can be used avoid double error messages.
3804 *
3805 * @returns The error count.
3806 * @param pVM The VM handle.
3807 */
3808VMMR3DECL(uint32_t) VMR3GetErrorCount(PVM pVM)
3809{
3810 AssertPtrReturn(pVM, 0);
3811 return VMR3GetErrorCountU(pVM->pUVM);
3812}
3813
3814
3815/**
3816 * Gets the number of errors raised via VMSetError.
3817 *
3818 * This can be used avoid double error messages.
3819 *
3820 * @returns The error count.
3821 * @param pVM The VM handle.
3822 */
3823VMMR3DECL(uint32_t) VMR3GetErrorCountU(PUVM pUVM)
3824{
3825 AssertPtrReturn(pUVM, 0);
3826 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3827 return pUVM->vm.s.cErrors;
3828}
3829
3830
3831/**
3832 * Creation time wrapper for vmR3SetErrorUV.
3833 *
3834 * @returns rc.
3835 * @param pUVM Pointer to the user mode VM structure.
3836 * @param rc The VBox status code.
3837 * @param RT_SRC_POS_DECL The source position of this error.
3838 * @param pszFormat Format string.
3839 * @param ... The arguments.
3840 * @thread Any thread.
3841 */
3842static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3843{
3844 va_list va;
3845 va_start(va, pszFormat);
3846 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3847 va_end(va);
3848 return rc;
3849}
3850
3851
3852/**
3853 * Worker which calls everyone listening to the VM error messages.
3854 *
3855 * @param pUVM Pointer to the user mode VM structure.
3856 * @param rc The VBox status code.
3857 * @param RT_SRC_POS_DECL The source position of this error.
3858 * @param pszFormat Format string.
3859 * @param pArgs Pointer to the format arguments.
3860 * @thread EMT
3861 */
3862DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3863{
3864 /*
3865 * Log the error.
3866 */
3867 va_list va3;
3868 va_copy(va3, *pArgs);
3869 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3870 "VMSetError: %N\n",
3871 pszFile, iLine, pszFunction, rc,
3872 pszFormat, &va3);
3873 va_end(va3);
3874
3875#ifdef LOG_ENABLED
3876 va_copy(va3, *pArgs);
3877 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3878 "%N\n",
3879 pszFile, iLine, pszFunction, rc,
3880 pszFormat, &va3);
3881 va_end(va3);
3882#endif
3883
3884 /*
3885 * Make a copy of the message.
3886 */
3887 if (pUVM->pVM)
3888 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3889
3890 /*
3891 * Call the at error callbacks.
3892 */
3893 bool fCalledSomeone = false;
3894 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3895 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3896 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3897 {
3898 va_list va2;
3899 va_copy(va2, *pArgs);
3900 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3901 va_end(va2);
3902 fCalledSomeone = true;
3903 }
3904 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3905}
3906
3907
3908/**
3909 * Registers a VM runtime error callback.
3910 *
3911 * @returns VBox status code.
3912 * @param pVM The VM handle.
3913 * @param pfnAtRuntimeError Pointer to callback.
3914 * @param pvUser User argument.
3915 * @thread Any.
3916 */
3917VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3918{
3919 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3920
3921 /*
3922 * Validate input.
3923 */
3924 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3925 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3926
3927 /*
3928 * Allocate a new record.
3929 */
3930 PUVM pUVM = pVM->pUVM;
3931 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3932 if (!pNew)
3933 return VERR_NO_MEMORY;
3934
3935 /* fill */
3936 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3937 pNew->pvUser = pvUser;
3938
3939 /* insert */
3940 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3941 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3942 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3943 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3944 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3945
3946 return VINF_SUCCESS;
3947}
3948
3949
3950/**
3951 * Deregisters a VM runtime error callback.
3952 *
3953 * @returns VBox status code.
3954 * @param pVM The VM handle.
3955 * @param pfnAtRuntimeError Pointer to callback.
3956 * @param pvUser User argument.
3957 * @thread Any.
3958 */
3959VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3960{
3961 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3962
3963 /*
3964 * Validate input.
3965 */
3966 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3967 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3968
3969 PUVM pUVM = pVM->pUVM;
3970 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3971
3972 /*
3973 * Search the list for the entry.
3974 */
3975 PVMATRUNTIMEERROR pPrev = NULL;
3976 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3977 while ( pCur
3978 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3979 || pCur->pvUser != pvUser))
3980 {
3981 pPrev = pCur;
3982 pCur = pCur->pNext;
3983 }
3984 if (!pCur)
3985 {
3986 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
3987 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3988 return VERR_FILE_NOT_FOUND;
3989 }
3990
3991 /*
3992 * Unlink it.
3993 */
3994 if (pPrev)
3995 {
3996 pPrev->pNext = pCur->pNext;
3997 if (!pCur->pNext)
3998 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
3999 }
4000 else
4001 {
4002 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
4003 if (!pCur->pNext)
4004 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
4005 }
4006
4007 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4008
4009 /*
4010 * Free it.
4011 */
4012 pCur->pfnAtRuntimeError = NULL;
4013 pCur->pNext = NULL;
4014 MMR3HeapFree(pCur);
4015
4016 return VINF_SUCCESS;
4017}
4018
4019
4020/**
4021 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
4022 * the state to FatalError(LS).
4023 *
4024 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
4025 * return code, see FNVMMEMTRENDEZVOUS.)
4026 *
4027 * @param pVM The VM handle.
4028 * @param pVCpu The VMCPU handle of the EMT.
4029 * @param pvUser Ignored.
4030 */
4031static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
4032{
4033 NOREF(pVCpu);
4034 Assert(!pvUser); NOREF(pvUser);
4035
4036 /*
4037 * The first EMT thru here changes the state.
4038 */
4039 if (pVCpu->idCpu == pVM->cCpus - 1)
4040 {
4041 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
4042 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
4043 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
4044 if (RT_FAILURE(rc))
4045 return rc;
4046 if (rc == 2)
4047 SSMR3Cancel(pVM);
4048
4049 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
4050 }
4051
4052 /* This'll make sure we get out of whereever we are (e.g. REM). */
4053 return VINF_EM_SUSPEND;
4054}
4055
4056
4057/**
4058 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
4059 *
4060 * This does the common parts after the error has been saved / retrieved.
4061 *
4062 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4063 *
4064 * @param pVM The VM handle.
4065 * @param fFlags The error flags.
4066 * @param pszErrorId Error ID string.
4067 * @param pszFormat Format string.
4068 * @param pVa Pointer to the format arguments.
4069 */
4070static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4071{
4072 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
4073
4074 /*
4075 * Take actions before the call.
4076 */
4077 int rc;
4078 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4079 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4080 vmR3SetRuntimeErrorChangeState, NULL);
4081 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4082 rc = VMR3Suspend(pVM);
4083 else
4084 rc = VINF_SUCCESS;
4085
4086 /*
4087 * Do the callback round.
4088 */
4089 PUVM pUVM = pVM->pUVM;
4090 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4091 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4092 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4093 {
4094 va_list va;
4095 va_copy(va, *pVa);
4096 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4097 va_end(va);
4098 }
4099 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4100
4101 return rc;
4102}
4103
4104
4105/**
4106 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4107 */
4108static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4109{
4110 va_list va;
4111 va_start(va, pszFormat);
4112 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4113 va_end(va);
4114 return rc;
4115}
4116
4117
4118/**
4119 * This is a worker function for RC and Ring-0 calls to VMSetError and
4120 * VMSetErrorV.
4121 *
4122 * The message is found in VMINT.
4123 *
4124 * @returns VBox status code, see VMSetRuntimeError.
4125 * @param pVM The VM handle.
4126 * @thread EMT.
4127 */
4128VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4129{
4130 VM_ASSERT_EMT(pVM);
4131 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4132
4133 /*
4134 * Unpack the error (if we managed to format one).
4135 */
4136 const char *pszErrorId = "SetRuntimeError";
4137 const char *pszMessage = "No message!";
4138 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4139 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4140 if (pErr)
4141 {
4142 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4143 if (pErr->offErrorId)
4144 pszErrorId = (const char *)pErr + pErr->offErrorId;
4145 if (pErr->offMessage)
4146 pszMessage = (const char *)pErr + pErr->offMessage;
4147 fFlags = pErr->fFlags;
4148 }
4149
4150 /*
4151 * Join cause with vmR3SetRuntimeErrorV.
4152 */
4153 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4154}
4155
4156
4157/**
4158 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4159 *
4160 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4161 *
4162 * @param pVM The VM handle.
4163 * @param fFlags The error flags.
4164 * @param pszErrorId Error ID string.
4165 * @param pszMessage The error message residing the MM heap.
4166 *
4167 * @thread EMT
4168 */
4169DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4170{
4171#if 0 /** @todo make copy of the error msg. */
4172 /*
4173 * Make a copy of the message.
4174 */
4175 va_list va2;
4176 va_copy(va2, *pVa);
4177 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4178 va_end(va2);
4179#endif
4180
4181 /*
4182 * Join paths with VMR3SetRuntimeErrorWorker.
4183 */
4184 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4185 MMR3HeapFree(pszMessage);
4186 return rc;
4187}
4188
4189
4190/**
4191 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4192 *
4193 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4194 *
4195 * @param pVM The VM handle.
4196 * @param fFlags The error flags.
4197 * @param pszErrorId Error ID string.
4198 * @param pszFormat Format string.
4199 * @param pVa Pointer to the format arguments.
4200 *
4201 * @thread EMT
4202 */
4203DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4204{
4205 /*
4206 * Make a copy of the message.
4207 */
4208 va_list va2;
4209 va_copy(va2, *pVa);
4210 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4211 va_end(va2);
4212
4213 /*
4214 * Join paths with VMR3SetRuntimeErrorWorker.
4215 */
4216 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4217}
4218
4219
4220/**
4221 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4222 *
4223 * This can be used avoid double error messages.
4224 *
4225 * @returns The runtime error count.
4226 * @param pVM The VM handle.
4227 */
4228VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
4229{
4230 return pVM->pUVM->vm.s.cRuntimeErrors;
4231}
4232
4233
4234/**
4235 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4236 *
4237 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4238 *
4239 * @param pVM The VM handle.
4240 */
4241VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4242{
4243 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4244 return pUVCpu
4245 ? pUVCpu->idCpu
4246 : NIL_VMCPUID;
4247}
4248
4249
4250/**
4251 * Returns the native handle of the current EMT VMCPU thread.
4252 *
4253 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4254 * @param pVM The VM handle.
4255 * @thread EMT
4256 */
4257VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4258{
4259 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4260
4261 if (!pUVCpu)
4262 return NIL_RTNATIVETHREAD;
4263
4264 return pUVCpu->vm.s.NativeThreadEMT;
4265}
4266
4267
4268/**
4269 * Returns the native handle of the current EMT VMCPU thread.
4270 *
4271 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4272 * @param pVM The VM handle.
4273 * @thread EMT
4274 */
4275VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4276{
4277 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4278
4279 if (!pUVCpu)
4280 return NIL_RTNATIVETHREAD;
4281
4282 return pUVCpu->vm.s.NativeThreadEMT;
4283}
4284
4285
4286/**
4287 * Returns the handle of the current EMT VMCPU thread.
4288 *
4289 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4290 * @param pVM The VM handle.
4291 * @thread EMT
4292 */
4293VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
4294{
4295 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4296
4297 if (!pUVCpu)
4298 return NIL_RTTHREAD;
4299
4300 return pUVCpu->vm.s.ThreadEMT;
4301}
4302
4303
4304/**
4305 * Returns the handle of the current EMT VMCPU thread.
4306 *
4307 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4308 * @param pVM The VM handle.
4309 * @thread EMT
4310 */
4311VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
4312{
4313 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4314
4315 if (!pUVCpu)
4316 return NIL_RTTHREAD;
4317
4318 return pUVCpu->vm.s.ThreadEMT;
4319}
4320
4321
4322/**
4323 * Return the package and core id of a CPU.
4324 *
4325 * @returns VBOX status code.
4326 * @param pVM The VM to operate on.
4327 * @param idCpu Virtual CPU to get the ID from.
4328 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4329 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4330 *
4331 */
4332VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PVM pVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4333{
4334 /*
4335 * Validate input.
4336 */
4337 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4338 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4339 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4340 if (idCpu >= pVM->cCpus)
4341 return VERR_INVALID_CPU_ID;
4342
4343 /*
4344 * Set return values.
4345 */
4346#ifdef VBOX_WITH_MULTI_CORE
4347 *pidCpuCore = idCpu;
4348 *pidCpuPackage = 0;
4349#else
4350 *pidCpuCore = 0;
4351 *pidCpuPackage = idCpu;
4352#endif
4353
4354 return VINF_SUCCESS;
4355}
4356
4357
4358/**
4359 * Worker for VMR3HotUnplugCpu.
4360 *
4361 * @returns VINF_EM_WAIT_SPIP (strict status code).
4362 * @param pVM The VM handle.
4363 * @param idCpu The current CPU.
4364 */
4365static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4366{
4367 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4368 VMCPU_ASSERT_EMT(pVCpu);
4369
4370 /*
4371 * Reset per CPU resources.
4372 *
4373 * Actually only needed for VT-x because the CPU seems to be still in some
4374 * paged mode and startup fails after a new hot plug event. SVM works fine
4375 * even without this.
4376 */
4377 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4378 PGMR3ResetUnpluggedCpu(pVM, pVCpu);
4379 PDMR3ResetCpu(pVCpu);
4380 TRPMR3ResetCpu(pVCpu);
4381 CPUMR3ResetCpu(pVCpu);
4382 EMR3ResetCpu(pVCpu);
4383 HWACCMR3ResetCpu(pVCpu);
4384 return VINF_EM_WAIT_SIPI;
4385}
4386
4387
4388/**
4389 * Hot-unplugs a CPU from the guest.
4390 *
4391 * @returns VBox status code.
4392 * @param pVM The VM to operate on.
4393 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4394 */
4395VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4396{
4397 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4398 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4399
4400 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4401 * broadcast requests. Just note down somewhere that the CPU is
4402 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4403 * it out of the EM loops when offline. */
4404 return VMR3ReqCallNoWaitU(pVM->pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4405}
4406
4407
4408/**
4409 * Hot-plugs a CPU on the guest.
4410 *
4411 * @returns VBox status code.
4412 * @param pVM The VM to operate on.
4413 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4414 */
4415VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
4416{
4417 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4418 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4419
4420 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4421 return VINF_SUCCESS;
4422}
4423
4424
4425/**
4426 * Changes the VMM execution cap.
4427 *
4428 * @returns VBox status code.
4429 * @param pVM The VM to operate on.
4430 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4431 * 100 is max performance (default).
4432 */
4433VMMR3DECL(int) VMR3SetCpuExecutionCap(PVM pVM, uint32_t uCpuExecutionCap)
4434{
4435 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4436 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4437
4438 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4439 /* Note: not called from EMT. */
4440 pVM->uCpuExecutionCap = uCpuExecutionCap;
4441 return VINF_SUCCESS;
4442}
4443
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette