VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 36441

Last change on this file since 36441 was 36441, checked in by vboxsync, 14 years ago

VMM: Sketched out where to do the initial I/O MMU setup. This adds a VMINITCOMPLETED_HWACCM and makes HWACCMR3InitFinalizeR0 private (invoked from HWACCMR3InitCompleted(,_RING0).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 155.9 KB
Line 
1/* $Id: VM.cpp 36441 2011-03-25 21:11:56Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41/*******************************************************************************
42* Header Files *
43*******************************************************************************/
44#define LOG_GROUP LOG_GROUP_VM
45#include <VBox/vmm/cfgm.h>
46#include <VBox/vmm/vmm.h>
47#include <VBox/vmm/gvmm.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/cpum.h>
50#include <VBox/vmm/selm.h>
51#include <VBox/vmm/trpm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/vmm/pgm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/em.h>
57#include <VBox/vmm/rem.h>
58#include <VBox/vmm/tm.h>
59#include <VBox/vmm/stam.h>
60#include <VBox/vmm/patm.h>
61#include <VBox/vmm/csam.h>
62#include <VBox/vmm/iom.h>
63#include <VBox/vmm/ssm.h>
64#include <VBox/vmm/ftm.h>
65#include <VBox/vmm/hwaccm.h>
66#include "VMInternal.h"
67#include <VBox/vmm/vm.h>
68#include <VBox/vmm/uvm.h>
69
70#include <VBox/sup.h>
71#include <VBox/dbg.h>
72#include <VBox/err.h>
73#include <VBox/param.h>
74#include <VBox/log.h>
75#include <iprt/assert.h>
76#include <iprt/alloc.h>
77#include <iprt/asm.h>
78#include <iprt/env.h>
79#include <iprt/string.h>
80#include <iprt/time.h>
81#include <iprt/semaphore.h>
82#include <iprt/thread.h>
83#include <iprt/uuid.h>
84
85
86/*******************************************************************************
87* Structures and Typedefs *
88*******************************************************************************/
89/**
90 * VM destruction callback registration record.
91 */
92typedef struct VMATDTOR
93{
94 /** Pointer to the next record in the list. */
95 struct VMATDTOR *pNext;
96 /** Pointer to the callback function. */
97 PFNVMATDTOR pfnAtDtor;
98 /** The user argument. */
99 void *pvUser;
100} VMATDTOR;
101/** Pointer to a VM destruction callback registration record. */
102typedef VMATDTOR *PVMATDTOR;
103
104
105/*******************************************************************************
106* Global Variables *
107*******************************************************************************/
108/** Pointer to the list of VMs. */
109static PUVM g_pUVMsHead = NULL;
110
111/** Pointer to the list of at VM destruction callbacks. */
112static PVMATDTOR g_pVMAtDtorHead = NULL;
113/** Lock the g_pVMAtDtorHead list. */
114#define VM_ATDTOR_LOCK() do { } while (0)
115/** Unlock the g_pVMAtDtorHead list. */
116#define VM_ATDTOR_UNLOCK() do { } while (0)
117
118
119/*******************************************************************************
120* Internal Functions *
121*******************************************************************************/
122static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
123static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
124static int vmR3InitRing3(PVM pVM, PUVM pUVM);
125static int vmR3InitRing0(PVM pVM);
126static int vmR3InitGC(PVM pVM);
127static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
128static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
129static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
130static void vmR3AtDtor(PVM pVM);
131static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
132static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
133static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
134static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
135static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
136static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
137
138
139/**
140 * Do global VMM init.
141 *
142 * @returns VBox status code.
143 */
144VMMR3DECL(int) VMR3GlobalInit(void)
145{
146 /*
147 * Only once.
148 */
149 static bool volatile s_fDone = false;
150 if (s_fDone)
151 return VINF_SUCCESS;
152
153 /*
154 * We're done.
155 */
156 s_fDone = true;
157 return VINF_SUCCESS;
158}
159
160
161
162/**
163 * Creates a virtual machine by calling the supplied configuration constructor.
164 *
165 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
166 * called to start the execution.
167 *
168 * @returns 0 on success.
169 * @returns VBox error code on failure.
170 * @param cCpus Number of virtual CPUs for the new VM.
171 * @param pVmm2UserMethods An optional method table that the VMM can use
172 * to make the user perform various action, like
173 * for instance state saving.
174 * @param pfnVMAtError Pointer to callback function for setting VM
175 * errors. This was added as an implicit call to
176 * VMR3AtErrorRegister() since there is no way the
177 * caller can get to the VM handle early enough to
178 * do this on its own.
179 * This is called in the context of an EMT.
180 * @param pvUserVM The user argument passed to pfnVMAtError.
181 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
182 * This is called in the context of an EMT0.
183 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
184 * @param ppVM Where to store the 'handle' of the created VM.
185 */
186VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
187 PFNVMATERROR pfnVMAtError, void *pvUserVM,
188 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
189 PVM *ppVM)
190{
191 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
192 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
193
194 if (pVmm2UserMethods)
195 {
196 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
197 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
198 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
199 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
200 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
201 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
202 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
203 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
204 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
205 }
206 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
207 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
208 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
209
210 /*
211 * Because of the current hackiness of the applications
212 * we'll have to initialize global stuff from here.
213 * Later the applications will take care of this in a proper way.
214 */
215 static bool fGlobalInitDone = false;
216 if (!fGlobalInitDone)
217 {
218 int rc = VMR3GlobalInit();
219 if (RT_FAILURE(rc))
220 return rc;
221 fGlobalInitDone = true;
222 }
223
224 /*
225 * Validate input.
226 */
227 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
228
229 /*
230 * Create the UVM so we can register the at-error callback
231 * and consolidate a bit of cleanup code.
232 */
233 PUVM pUVM = NULL; /* shuts up gcc */
234 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
235 if (RT_FAILURE(rc))
236 return rc;
237 if (pfnVMAtError)
238 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
239 if (RT_SUCCESS(rc))
240 {
241 /*
242 * Initialize the support library creating the session for this VM.
243 */
244 rc = SUPR3Init(&pUVM->vm.s.pSession);
245 if (RT_SUCCESS(rc))
246 {
247 /*
248 * Call vmR3CreateU in the EMT thread and wait for it to finish.
249 *
250 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
251 * submitting a request to a specific VCPU without a pVM. So, to make
252 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
253 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
254 */
255 PVMREQ pReq;
256 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
257 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
258 if (RT_SUCCESS(rc))
259 {
260 rc = pReq->iStatus;
261 VMR3ReqFree(pReq);
262 if (RT_SUCCESS(rc))
263 {
264 /*
265 * Success!
266 */
267 *ppVM = pUVM->pVM;
268 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
269 return VINF_SUCCESS;
270 }
271 }
272 else
273 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
274
275 /*
276 * An error occurred during VM creation. Set the error message directly
277 * using the initial callback, as the callback list might not exist yet.
278 */
279 const char *pszError;
280 switch (rc)
281 {
282 case VERR_VMX_IN_VMX_ROOT_MODE:
283#ifdef RT_OS_LINUX
284 pszError = N_("VirtualBox can't operate in VMX root mode. "
285 "Please disable the KVM kernel extension, recompile your kernel and reboot");
286#else
287 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
288#endif
289 break;
290
291#ifndef RT_OS_DARWIN
292 case VERR_HWACCM_CONFIG_MISMATCH:
293 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
294 "This hardware extension is required by the VM configuration");
295 break;
296#endif
297
298 case VERR_SVM_IN_USE:
299#ifdef RT_OS_LINUX
300 pszError = N_("VirtualBox can't enable the AMD-V extension. "
301 "Please disable the KVM kernel extension, recompile your kernel and reboot");
302#else
303 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
304#endif
305 break;
306
307#ifdef RT_OS_LINUX
308 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
309 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
310 "that no kernel modules from an older version of VirtualBox exist. "
311 "Then try to recompile and reload the kernel modules by executing "
312 "'/etc/init.d/vboxdrv setup' as root");
313 break;
314#endif
315
316 case VERR_RAW_MODE_INVALID_SMP:
317 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
318 "VirtualBox requires this hardware extension to emulate more than one "
319 "guest CPU");
320 break;
321
322 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
323#ifdef RT_OS_LINUX
324 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
325 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
326 "the VT-x extension in the VM settings. Note that without VT-x you have "
327 "to reduce the number of guest CPUs to one");
328#else
329 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
330 "extension. Either upgrade your kernel or disable the VT-x extension in the "
331 "VM settings. Note that without VT-x you have to reduce the number of guest "
332 "CPUs to one");
333#endif
334 break;
335
336 case VERR_PDM_DEVICE_NOT_FOUND:
337 pszError = N_("A virtual device is configured in the VM settings but the device "
338 "implementation is missing.\n"
339 "A possible reason for this error is a missing extension pack. Note "
340 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
341 "support and remote desktop) are only available from an 'extension "
342 "pack' which must be downloaded and installed separately");
343 break;
344
345 default:
346 if (VMR3GetErrorCountU(pUVM) == 0)
347 pszError = RTErrGetFull(rc);
348 else
349 pszError = NULL; /* already set. */
350 break;
351 }
352 if (pszError)
353 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
354 }
355 else
356 {
357 /*
358 * An error occurred at support library initialization time (before the
359 * VM could be created). Set the error message directly using the
360 * initial callback, as the callback list doesn't exist yet.
361 */
362 const char *pszError;
363 switch (rc)
364 {
365 case VERR_VM_DRIVER_LOAD_ERROR:
366#ifdef RT_OS_LINUX
367 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
368 "was either not loaded or /dev/vboxdrv is not set up properly. "
369 "Re-setup the kernel module by executing "
370 "'/etc/init.d/vboxdrv setup' as root");
371#else
372 pszError = N_("VirtualBox kernel driver not loaded");
373#endif
374 break;
375 case VERR_VM_DRIVER_OPEN_ERROR:
376 pszError = N_("VirtualBox kernel driver cannot be opened");
377 break;
378 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
379#ifdef VBOX_WITH_HARDENING
380 /* This should only happen if the executable wasn't hardened - bad code/build. */
381 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
382 "Re-install VirtualBox. If you are building it yourself, you "
383 "should make sure it installed correctly and that the setuid "
384 "bit is set on the executables calling VMR3Create.");
385#else
386 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
387# if defined(RT_OS_DARWIN)
388 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
389 "If you have built VirtualBox yourself, make sure that you do not "
390 "have the vboxdrv KEXT from a different build or installation loaded.");
391# elif defined(RT_OS_LINUX)
392 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
393 "If you have built VirtualBox yourself, make sure that you do "
394 "not have the vboxdrv kernel module from a different build or "
395 "installation loaded. Also, make sure the vboxdrv udev rule gives "
396 "you the permission you need to access the device.");
397# elif defined(RT_OS_WINDOWS)
398 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
399# else /* solaris, freebsd, ++. */
400 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
401 "If you have built VirtualBox yourself, make sure that you do "
402 "not have the vboxdrv kernel module from a different install loaded.");
403# endif
404#endif
405 break;
406 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
407 case VERR_VM_DRIVER_NOT_INSTALLED:
408#ifdef RT_OS_LINUX
409 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
410 "was either not loaded or /dev/vboxdrv was not created for some "
411 "reason. Re-setup the kernel module by executing "
412 "'/etc/init.d/vboxdrv setup' as root");
413#else
414 pszError = N_("VirtualBox kernel driver not installed");
415#endif
416 break;
417 case VERR_NO_MEMORY:
418 pszError = N_("VirtualBox support library out of memory");
419 break;
420 case VERR_VERSION_MISMATCH:
421 case VERR_VM_DRIVER_VERSION_MISMATCH:
422 pszError = N_("The VirtualBox support driver which is running is from a different "
423 "version of VirtualBox. You can correct this by stopping all "
424 "running instances of VirtualBox and reinstalling the software.");
425 break;
426 default:
427 pszError = N_("Unknown error initializing kernel driver");
428 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
429 }
430 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
431 }
432 }
433
434 /* cleanup */
435 vmR3DestroyUVM(pUVM, 2000);
436 LogFlow(("VMR3Create: returns %Rrc\n", rc));
437 return rc;
438}
439
440
441/**
442 * Creates the UVM.
443 *
444 * This will not initialize the support library even if vmR3DestroyUVM
445 * will terminate that.
446 *
447 * @returns VBox status code.
448 * @param cCpus Number of virtual CPUs
449 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
450 * table.
451 * @param ppUVM Where to store the UVM pointer.
452 */
453static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
454{
455 uint32_t i;
456
457 /*
458 * Create and initialize the UVM.
459 */
460 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
461 AssertReturn(pUVM, VERR_NO_MEMORY);
462 pUVM->u32Magic = UVM_MAGIC;
463 pUVM->cCpus = cCpus;
464 pUVM->pVmm2UserMethods = pVmm2UserMethods;
465
466 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
467
468 pUVM->vm.s.cUvmRefs = 1;
469 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
470 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
471 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
472
473 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
474 RTUuidClear(&pUVM->vm.s.Uuid);
475
476 /* Initialize the VMCPU array in the UVM. */
477 for (i = 0; i < cCpus; i++)
478 {
479 pUVM->aCpus[i].pUVM = pUVM;
480 pUVM->aCpus[i].idCpu = i;
481 }
482
483 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
484 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
485 AssertRC(rc);
486 if (RT_SUCCESS(rc))
487 {
488 /* Allocate a halt method event semaphore for each VCPU. */
489 for (i = 0; i < cCpus; i++)
490 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
491 for (i = 0; i < cCpus; i++)
492 {
493 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
494 if (RT_FAILURE(rc))
495 break;
496 }
497 if (RT_SUCCESS(rc))
498 {
499 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
500 if (RT_SUCCESS(rc))
501 {
502 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
503 if (RT_SUCCESS(rc))
504 {
505 /*
506 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
507 */
508 rc = STAMR3InitUVM(pUVM);
509 if (RT_SUCCESS(rc))
510 {
511 rc = MMR3InitUVM(pUVM);
512 if (RT_SUCCESS(rc))
513 {
514 rc = PDMR3InitUVM(pUVM);
515 if (RT_SUCCESS(rc))
516 {
517 /*
518 * Start the emulation threads for all VMCPUs.
519 */
520 for (i = 0; i < cCpus; i++)
521 {
522 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
523 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
524 cCpus > 1 ? "EMT-%u" : "EMT", i);
525 if (RT_FAILURE(rc))
526 break;
527
528 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
529 }
530
531 if (RT_SUCCESS(rc))
532 {
533 *ppUVM = pUVM;
534 return VINF_SUCCESS;
535 }
536
537 /* bail out. */
538 while (i-- > 0)
539 {
540 /** @todo rainy day: terminate the EMTs. */
541 }
542 PDMR3TermUVM(pUVM);
543 }
544 MMR3TermUVM(pUVM);
545 }
546 STAMR3TermUVM(pUVM);
547 }
548 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
549 }
550 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
551 }
552 }
553 for (i = 0; i < cCpus; i++)
554 {
555 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
556 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
557 }
558 RTTlsFree(pUVM->vm.s.idxTLS);
559 }
560 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
561 return rc;
562}
563
564
565/**
566 * Creates and initializes the VM.
567 *
568 * @thread EMT
569 */
570static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
571{
572 /*
573 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
574 */
575 int rc = PDMR3LdrLoadVMMR0U(pUVM);
576 if (RT_FAILURE(rc))
577 {
578 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
579 * bird: what about moving the message down here? Main picks the first message, right? */
580 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
581 return rc; /* proper error message set later on */
582 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
583 }
584
585 /*
586 * Request GVMM to create a new VM for us.
587 */
588 GVMMCREATEVMREQ CreateVMReq;
589 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
590 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
591 CreateVMReq.pSession = pUVM->vm.s.pSession;
592 CreateVMReq.pVMR0 = NIL_RTR0PTR;
593 CreateVMReq.pVMR3 = NULL;
594 CreateVMReq.cCpus = cCpus;
595 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
596 if (RT_SUCCESS(rc))
597 {
598 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
599 AssertRelease(VALID_PTR(pVM));
600 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
601 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
602 AssertRelease(pVM->cCpus == cCpus);
603 AssertRelease(pVM->uCpuExecutionCap == 100);
604 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
605
606 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
607 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
608
609 /*
610 * Initialize the VM structure and our internal data (VMINT).
611 */
612 pVM->pUVM = pUVM;
613
614 for (VMCPUID i = 0; i < pVM->cCpus; i++)
615 {
616 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
617 pVM->aCpus[i].idCpu = i;
618 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
619 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
620 /* hNativeThreadR0 is initialized on EMT registration. */
621 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
622 pUVM->aCpus[i].pVM = pVM;
623 }
624
625
626 /*
627 * Init the configuration.
628 */
629 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
630 if (RT_SUCCESS(rc))
631 {
632 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
633 rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
634 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
635 pVM->fHWACCMEnabled = true;
636
637 /*
638 * If executing in fake suplib mode disable RR3 and RR0 in the config.
639 */
640 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
641 if (psz && !strcmp(psz, "fake"))
642 {
643 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
644 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
645 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
646 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
647 }
648
649 /*
650 * Make sure the CPU count in the config data matches.
651 */
652 if (RT_SUCCESS(rc))
653 {
654 uint32_t cCPUsCfg;
655 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
656 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
657 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
658 {
659 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
660 cCPUsCfg, cCpus));
661 rc = VERR_INVALID_PARAMETER;
662 }
663 }
664
665 /*
666 * Get the CPU execution cap.
667 */
668 if (RT_SUCCESS(rc))
669 {
670 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
671 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc));
672 }
673
674 /*
675 * Get the VM name and UUID.
676 */
677 if (RT_SUCCESS(rc))
678 {
679 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
680 AssertLogRelMsg(RT_SUCCESS(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc));
681 }
682
683 if (RT_SUCCESS(rc))
684 {
685 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
686 AssertLogRelMsg(RT_SUCCESS(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc));
687 }
688
689 if (RT_SUCCESS(rc))
690 {
691 /*
692 * Init the ring-3 components and ring-3 per cpu data, finishing it off
693 * by a relocation round (intermediate context finalization will do this).
694 */
695 rc = vmR3InitRing3(pVM, pUVM);
696 if (RT_SUCCESS(rc))
697 {
698 rc = PGMR3FinalizeMappings(pVM);
699 if (RT_SUCCESS(rc))
700 {
701
702 LogFlow(("Ring-3 init succeeded\n"));
703
704 /*
705 * Init the Ring-0 components.
706 */
707 rc = vmR3InitRing0(pVM);
708 if (RT_SUCCESS(rc))
709 {
710 /* Relocate again, because some switcher fixups depends on R0 init results. */
711 VMR3Relocate(pVM, 0);
712
713#ifdef VBOX_WITH_DEBUGGER
714 /*
715 * Init the tcp debugger console if we're building
716 * with debugger support.
717 */
718 void *pvUser = NULL;
719 rc = DBGCTcpCreate(pVM, &pvUser);
720 if ( RT_SUCCESS(rc)
721 || rc == VERR_NET_ADDRESS_IN_USE)
722 {
723 pUVM->vm.s.pvDBGC = pvUser;
724#endif
725 /*
726 * Init the Guest Context components.
727 */
728 rc = vmR3InitGC(pVM);
729 if (RT_SUCCESS(rc))
730 {
731 /*
732 * Now we can safely set the VM halt method to default.
733 */
734 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
735 if (RT_SUCCESS(rc))
736 {
737 /*
738 * Set the state and link into the global list.
739 */
740 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
741 pUVM->pNext = g_pUVMsHead;
742 g_pUVMsHead = pUVM;
743
744#ifdef LOG_ENABLED
745 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
746#endif
747 return VINF_SUCCESS;
748 }
749 }
750#ifdef VBOX_WITH_DEBUGGER
751 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
752 pUVM->vm.s.pvDBGC = NULL;
753 }
754#endif
755 //..
756 }
757 }
758 vmR3Destroy(pVM);
759 }
760 }
761 //..
762
763 /* Clean CFGM. */
764 int rc2 = CFGMR3Term(pVM);
765 AssertRC(rc2);
766 }
767
768 /*
769 * Do automatic cleanups while the VM structure is still alive and all
770 * references to it are still working.
771 */
772 PDMR3CritSectTerm(pVM);
773
774 /*
775 * Drop all references to VM and the VMCPU structures, then
776 * tell GVMM to destroy the VM.
777 */
778 pUVM->pVM = NULL;
779 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
780 {
781 pUVM->aCpus[i].pVM = NULL;
782 pUVM->aCpus[i].pVCpu = NULL;
783 }
784 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
785
786 if (pUVM->cCpus > 1)
787 {
788 /* Poke the other EMTs since they may have stale pVM and pVCpu references
789 on the stack (see VMR3WaitU for instance) if they've been awakened after
790 VM creation. */
791 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
792 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
793 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
794 }
795
796 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
797 AssertRC(rc2);
798 }
799 else
800 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
801
802 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
803 return rc;
804}
805
806
807/**
808 * Register the calling EMT with GVM.
809 *
810 * @returns VBox status code.
811 * @param pVM The VM handle.
812 * @param idCpu The Virtual CPU ID.
813 */
814static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
815{
816 Assert(VMMGetCpuId(pVM) == idCpu);
817 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
818 if (RT_FAILURE(rc))
819 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
820 return rc;
821}
822
823
824/**
825 * Initializes all R3 components of the VM
826 */
827static int vmR3InitRing3(PVM pVM, PUVM pUVM)
828{
829 int rc;
830
831 /*
832 * Register the other EMTs with GVM.
833 */
834 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
835 {
836 rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
837 if (RT_FAILURE(rc))
838 return rc;
839 }
840
841 /*
842 * Init all R3 components, the order here might be important.
843 */
844 rc = MMR3Init(pVM);
845 if (RT_SUCCESS(rc))
846 {
847 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
848 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
849 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
850 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
851 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
852 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
853 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
854 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
855 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
856 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
857 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
858 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
859 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
860 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
861
862 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
863 {
864 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
865 AssertRC(rc);
866 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
867 AssertRC(rc);
868 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/VM/CPU%d/Halt/BlockOverslept", idCpu);
869 AssertRC(rc);
870 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/VM/CPU%d/Halt/BlockInsomnia", idCpu);
871 AssertRC(rc);
872 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/VM/CPU%d/Halt/BlockOnTime", idCpu);
873 AssertRC(rc);
874 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
875 AssertRC(rc);
876 }
877
878 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
879 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
880 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
881 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
882 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
883 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
884 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
885 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
886
887 rc = CPUMR3Init(pVM);
888 if (RT_SUCCESS(rc))
889 {
890 rc = HWACCMR3Init(pVM);
891 if (RT_SUCCESS(rc))
892 {
893 rc = PGMR3Init(pVM);
894 if (RT_SUCCESS(rc))
895 {
896 rc = REMR3Init(pVM);
897 if (RT_SUCCESS(rc))
898 {
899 rc = MMR3InitPaging(pVM);
900 if (RT_SUCCESS(rc))
901 rc = TMR3Init(pVM);
902 if (RT_SUCCESS(rc))
903 {
904 rc = FTMR3Init(pVM);
905 if (RT_SUCCESS(rc))
906 {
907 rc = VMMR3Init(pVM);
908 if (RT_SUCCESS(rc))
909 {
910 rc = SELMR3Init(pVM);
911 if (RT_SUCCESS(rc))
912 {
913 rc = TRPMR3Init(pVM);
914 if (RT_SUCCESS(rc))
915 {
916 rc = CSAMR3Init(pVM);
917 if (RT_SUCCESS(rc))
918 {
919 rc = PATMR3Init(pVM);
920 if (RT_SUCCESS(rc))
921 {
922 rc = IOMR3Init(pVM);
923 if (RT_SUCCESS(rc))
924 {
925 rc = EMR3Init(pVM);
926 if (RT_SUCCESS(rc))
927 {
928 rc = DBGFR3Init(pVM);
929 if (RT_SUCCESS(rc))
930 {
931 rc = PDMR3Init(pVM);
932 if (RT_SUCCESS(rc))
933 {
934 rc = PGMR3InitDynMap(pVM);
935 if (RT_SUCCESS(rc))
936 rc = MMR3HyperInitFinalize(pVM);
937 if (RT_SUCCESS(rc))
938 rc = PATMR3InitFinalize(pVM);
939 if (RT_SUCCESS(rc))
940 rc = PGMR3InitFinalize(pVM);
941 if (RT_SUCCESS(rc))
942 rc = SELMR3InitFinalize(pVM);
943 if (RT_SUCCESS(rc))
944 rc = TMR3InitFinalize(pVM);
945 if (RT_SUCCESS(rc))
946 rc = REMR3InitFinalize(pVM);
947 if (RT_SUCCESS(rc))
948 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
949 if (RT_SUCCESS(rc))
950 {
951 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
952 return VINF_SUCCESS;
953 }
954 int rc2 = PDMR3Term(pVM);
955 AssertRC(rc2);
956 }
957 int rc2 = DBGFR3Term(pVM);
958 AssertRC(rc2);
959 }
960 int rc2 = EMR3Term(pVM);
961 AssertRC(rc2);
962 }
963 int rc2 = IOMR3Term(pVM);
964 AssertRC(rc2);
965 }
966 int rc2 = PATMR3Term(pVM);
967 AssertRC(rc2);
968 }
969 int rc2 = CSAMR3Term(pVM);
970 AssertRC(rc2);
971 }
972 int rc2 = TRPMR3Term(pVM);
973 AssertRC(rc2);
974 }
975 int rc2 = SELMR3Term(pVM);
976 AssertRC(rc2);
977 }
978 int rc2 = VMMR3Term(pVM);
979 AssertRC(rc2);
980 }
981 int rc2 = FTMR3Term(pVM);
982 AssertRC(rc2);
983 }
984 int rc2 = TMR3Term(pVM);
985 AssertRC(rc2);
986 }
987 int rc2 = REMR3Term(pVM);
988 AssertRC(rc2);
989 }
990 int rc2 = PGMR3Term(pVM);
991 AssertRC(rc2);
992 }
993 int rc2 = HWACCMR3Term(pVM);
994 AssertRC(rc2);
995 }
996 //int rc2 = CPUMR3Term(pVM);
997 //AssertRC(rc2);
998 }
999 /* MMR3Term is not called here because it'll kill the heap. */
1000 }
1001
1002 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
1003 return rc;
1004}
1005
1006
1007/**
1008 * Initializes all R0 components of the VM
1009 */
1010static int vmR3InitRing0(PVM pVM)
1011{
1012 LogFlow(("vmR3InitRing0:\n"));
1013
1014 /*
1015 * Check for FAKE suplib mode.
1016 */
1017 int rc = VINF_SUCCESS;
1018 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1019 if (!psz || strcmp(psz, "fake"))
1020 {
1021 /*
1022 * Call the VMMR0 component and let it do the init.
1023 */
1024 rc = VMMR3InitR0(pVM);
1025 }
1026 else
1027 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1028
1029 /*
1030 * Do notifications and return.
1031 */
1032 if (RT_SUCCESS(rc))
1033 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1034 if (RT_SUCCESS(rc))
1035 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HWACCM);
1036
1037 /** @todo Move this to the VMINITCOMPLETED_HWACCM notification handler. */
1038 if (RT_SUCCESS(rc))
1039 CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
1040
1041 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1042 return rc;
1043}
1044
1045
1046/**
1047 * Initializes all GC components of the VM
1048 */
1049static int vmR3InitGC(PVM pVM)
1050{
1051 LogFlow(("vmR3InitGC:\n"));
1052
1053 /*
1054 * Check for FAKE suplib mode.
1055 */
1056 int rc = VINF_SUCCESS;
1057 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1058 if (!psz || strcmp(psz, "fake"))
1059 {
1060 /*
1061 * Call the VMMR0 component and let it do the init.
1062 */
1063 rc = VMMR3InitRC(pVM);
1064 }
1065 else
1066 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1067
1068 /*
1069 * Do notifications and return.
1070 */
1071 if (RT_SUCCESS(rc))
1072 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1073 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1074 return rc;
1075}
1076
1077
1078/**
1079 * Do init completed notifications.
1080 *
1081 * @returns VBox status code.
1082 * @param pVM The VM handle.
1083 * @param enmWhat What's completed.
1084 */
1085static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1086{
1087 int rc = VMMR3InitCompleted(pVM, enmWhat);
1088 if (RT_SUCCESS(rc))
1089 rc = HWACCMR3InitCompleted(pVM, enmWhat);
1090 if (RT_SUCCESS(rc))
1091 rc = PGMR3InitCompleted(pVM, enmWhat);
1092 return rc;
1093}
1094
1095
1096/**
1097 * Logger callback for inserting a custom prefix.
1098 *
1099 * @returns Number of chars written.
1100 * @param pLogger The logger.
1101 * @param pchBuf The output buffer.
1102 * @param cchBuf The output buffer size.
1103 * @param pvUser Pointer to the UVM structure.
1104 */
1105static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1106{
1107 AssertReturn(cchBuf >= 2, 0);
1108 PUVM pUVM = (PUVM)pvUser;
1109 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1110 if (pUVCpu)
1111 {
1112 static const char s_szHex[17] = "0123456789abcdef";
1113 VMCPUID const idCpu = pUVCpu->idCpu;
1114 pchBuf[1] = s_szHex[ idCpu & 15];
1115 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1116 }
1117 else
1118 {
1119 pchBuf[0] = 'x';
1120 pchBuf[1] = 'y';
1121 }
1122
1123 return 2;
1124}
1125
1126
1127/**
1128 * Calls the relocation functions for all VMM components so they can update
1129 * any GC pointers. When this function is called all the basic VM members
1130 * have been updated and the actual memory relocation have been done
1131 * by the PGM/MM.
1132 *
1133 * This is used both on init and on runtime relocations.
1134 *
1135 * @param pVM VM handle.
1136 * @param offDelta Relocation delta relative to old location.
1137 */
1138VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1139{
1140 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1141
1142 /*
1143 * The order here is very important!
1144 */
1145 PGMR3Relocate(pVM, offDelta);
1146 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1147 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1148 CPUMR3Relocate(pVM);
1149 HWACCMR3Relocate(pVM);
1150 SELMR3Relocate(pVM);
1151 VMMR3Relocate(pVM, offDelta);
1152 SELMR3Relocate(pVM); /* !hack! fix stack! */
1153 TRPMR3Relocate(pVM, offDelta);
1154 PATMR3Relocate(pVM);
1155 CSAMR3Relocate(pVM, offDelta);
1156 IOMR3Relocate(pVM, offDelta);
1157 EMR3Relocate(pVM);
1158 TMR3Relocate(pVM, offDelta);
1159 DBGFR3Relocate(pVM, offDelta);
1160 PDMR3Relocate(pVM, offDelta);
1161}
1162
1163
1164/**
1165 * EMT rendezvous worker for VMR3PowerOn.
1166 *
1167 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1168 * code, see FNVMMEMTRENDEZVOUS.)
1169 *
1170 * @param pVM The VM handle.
1171 * @param pVCpu The VMCPU handle of the EMT.
1172 * @param pvUser Ignored.
1173 */
1174static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1175{
1176 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1177 Assert(!pvUser); NOREF(pvUser);
1178
1179 /*
1180 * The first thread thru here tries to change the state. We shouldn't be
1181 * called again if this fails.
1182 */
1183 if (pVCpu->idCpu == pVM->cCpus - 1)
1184 {
1185 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1186 if (RT_FAILURE(rc))
1187 return rc;
1188 }
1189
1190 VMSTATE enmVMState = VMR3GetState(pVM);
1191 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1192 ("%s\n", VMR3GetStateName(enmVMState)),
1193 VERR_INTERNAL_ERROR_4);
1194
1195 /*
1196 * All EMTs changes their state to started.
1197 */
1198 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1199
1200 /*
1201 * EMT(0) is last thru here and it will make the notification calls
1202 * and advance the state.
1203 */
1204 if (pVCpu->idCpu == 0)
1205 {
1206 PDMR3PowerOn(pVM);
1207 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1208 }
1209
1210 return VINF_SUCCESS;
1211}
1212
1213
1214/**
1215 * Powers on the virtual machine.
1216 *
1217 * @returns VBox status code.
1218 *
1219 * @param pVM The VM to power on.
1220 *
1221 * @thread Any thread.
1222 * @vmstate Created
1223 * @vmstateto PoweringOn+Running
1224 */
1225VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1226{
1227 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1228 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1229
1230 /*
1231 * Gather all the EMTs to reduce the init TSC drift and keep
1232 * the state changing APIs a bit uniform.
1233 */
1234 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1235 vmR3PowerOn, NULL);
1236 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1237 return rc;
1238}
1239
1240
1241/**
1242 * Does the suspend notifications.
1243 *
1244 * @param pVM The VM handle.
1245 * @thread EMT(0)
1246 */
1247static void vmR3SuspendDoWork(PVM pVM)
1248{
1249 PDMR3Suspend(pVM);
1250}
1251
1252
1253/**
1254 * EMT rendezvous worker for VMR3Suspend.
1255 *
1256 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1257 * return code, see FNVMMEMTRENDEZVOUS.)
1258 *
1259 * @param pVM The VM handle.
1260 * @param pVCpu The VMCPU handle of the EMT.
1261 * @param pvUser Ignored.
1262 */
1263static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1264{
1265 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1266 Assert(!pvUser); NOREF(pvUser);
1267
1268 /*
1269 * The first EMT switches the state to suspending. If this fails because
1270 * something was racing us in one way or the other, there will be no more
1271 * calls and thus the state assertion below is not going to annoy anyone.
1272 */
1273 if (pVCpu->idCpu == pVM->cCpus - 1)
1274 {
1275 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1276 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1277 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1278 if (RT_FAILURE(rc))
1279 return rc;
1280 }
1281
1282 VMSTATE enmVMState = VMR3GetState(pVM);
1283 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1284 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1285 ("%s\n", VMR3GetStateName(enmVMState)),
1286 VERR_INTERNAL_ERROR_4);
1287
1288 /*
1289 * EMT(0) does the actually suspending *after* all the other CPUs have
1290 * been thru here.
1291 */
1292 if (pVCpu->idCpu == 0)
1293 {
1294 vmR3SuspendDoWork(pVM);
1295
1296 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1297 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1298 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1299 if (RT_FAILURE(rc))
1300 return VERR_INTERNAL_ERROR_3;
1301 }
1302
1303 return VINF_EM_SUSPEND;
1304}
1305
1306
1307/**
1308 * Suspends a running VM.
1309 *
1310 * @returns VBox status code. When called on EMT, this will be a strict status
1311 * code that has to be propagated up the call stack.
1312 *
1313 * @param pVM The VM to suspend.
1314 *
1315 * @thread Any thread.
1316 * @vmstate Running or RunningLS
1317 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1318 */
1319VMMR3DECL(int) VMR3Suspend(PVM pVM)
1320{
1321 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1322 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1323
1324 /*
1325 * Gather all the EMTs to make sure there are no races before
1326 * changing the VM state.
1327 */
1328 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1329 vmR3Suspend, NULL);
1330 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1331 return rc;
1332}
1333
1334
1335/**
1336 * EMT rendezvous worker for VMR3Resume.
1337 *
1338 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1339 * return code, see FNVMMEMTRENDEZVOUS.)
1340 *
1341 * @param pVM The VM handle.
1342 * @param pVCpu The VMCPU handle of the EMT.
1343 * @param pvUser Ignored.
1344 */
1345static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1346{
1347 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1348 Assert(!pvUser); NOREF(pvUser);
1349
1350 /*
1351 * The first thread thru here tries to change the state. We shouldn't be
1352 * called again if this fails.
1353 */
1354 if (pVCpu->idCpu == pVM->cCpus - 1)
1355 {
1356 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1357 if (RT_FAILURE(rc))
1358 return rc;
1359 }
1360
1361 VMSTATE enmVMState = VMR3GetState(pVM);
1362 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1363 ("%s\n", VMR3GetStateName(enmVMState)),
1364 VERR_INTERNAL_ERROR_4);
1365
1366#if 0
1367 /*
1368 * All EMTs changes their state to started.
1369 */
1370 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1371#endif
1372
1373 /*
1374 * EMT(0) is last thru here and it will make the notification calls
1375 * and advance the state.
1376 */
1377 if (pVCpu->idCpu == 0)
1378 {
1379 PDMR3Resume(pVM);
1380 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1381 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1382 }
1383
1384 return VINF_EM_RESUME;
1385}
1386
1387
1388/**
1389 * Resume VM execution.
1390 *
1391 * @returns VBox status code. When called on EMT, this will be a strict status
1392 * code that has to be propagated up the call stack.
1393 *
1394 * @param pVM The VM to resume.
1395 *
1396 * @thread Any thread.
1397 * @vmstate Suspended
1398 * @vmstateto Running
1399 */
1400VMMR3DECL(int) VMR3Resume(PVM pVM)
1401{
1402 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1403 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1404
1405 /*
1406 * Gather all the EMTs to make sure there are no races before
1407 * changing the VM state.
1408 */
1409 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1410 vmR3Resume, NULL);
1411 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1412 return rc;
1413}
1414
1415
1416/**
1417 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1418 * after the live step has been completed.
1419 *
1420 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1421 * return code, see FNVMMEMTRENDEZVOUS.)
1422 *
1423 * @param pVM The VM handle.
1424 * @param pVCpu The VMCPU handle of the EMT.
1425 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1426 */
1427static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1428{
1429 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1430 bool *pfSuspended = (bool *)pvUser;
1431
1432 /*
1433 * The first thread thru here tries to change the state. We shouldn't be
1434 * called again if this fails.
1435 */
1436 if (pVCpu->idCpu == pVM->cCpus - 1U)
1437 {
1438 PUVM pUVM = pVM->pUVM;
1439 int rc;
1440
1441 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1442 VMSTATE enmVMState = pVM->enmVMState;
1443 switch (enmVMState)
1444 {
1445 case VMSTATE_RUNNING_LS:
1446 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS);
1447 rc = VINF_SUCCESS;
1448 break;
1449
1450 case VMSTATE_SUSPENDED_EXT_LS:
1451 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1452 rc = VINF_SUCCESS;
1453 break;
1454
1455 case VMSTATE_DEBUGGING_LS:
1456 rc = VERR_TRY_AGAIN;
1457 break;
1458
1459 case VMSTATE_OFF_LS:
1460 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS);
1461 rc = VERR_SSM_LIVE_POWERED_OFF;
1462 break;
1463
1464 case VMSTATE_FATAL_ERROR_LS:
1465 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS);
1466 rc = VERR_SSM_LIVE_FATAL_ERROR;
1467 break;
1468
1469 case VMSTATE_GURU_MEDITATION_LS:
1470 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS);
1471 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1472 break;
1473
1474 case VMSTATE_POWERING_OFF_LS:
1475 case VMSTATE_SUSPENDING_EXT_LS:
1476 case VMSTATE_RESETTING_LS:
1477 default:
1478 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1479 rc = VERR_INTERNAL_ERROR_3;
1480 break;
1481 }
1482 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1483 if (RT_FAILURE(rc))
1484 {
1485 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1486 return rc;
1487 }
1488 }
1489
1490 VMSTATE enmVMState = VMR3GetState(pVM);
1491 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1492 ("%s\n", VMR3GetStateName(enmVMState)),
1493 VERR_INTERNAL_ERROR_4);
1494
1495 /*
1496 * Only EMT(0) have work to do since it's last thru here.
1497 */
1498 if (pVCpu->idCpu == 0)
1499 {
1500 vmR3SuspendDoWork(pVM);
1501 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1502 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1503 if (RT_FAILURE(rc))
1504 return VERR_INTERNAL_ERROR_3;
1505
1506 *pfSuspended = true;
1507 }
1508
1509 return VINF_EM_SUSPEND;
1510}
1511
1512
1513/**
1514 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1515 * SSMR3LiveDoStep1 failure.
1516 *
1517 * Doing this as a rendezvous operation avoids all annoying transition
1518 * states.
1519 *
1520 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1521 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1522 *
1523 * @param pVM The VM handle.
1524 * @param pVCpu The VMCPU handle of the EMT.
1525 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1526 */
1527static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1528{
1529 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1530 bool *pfSuspended = (bool *)pvUser;
1531 NOREF(pVCpu);
1532
1533 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1534 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1535 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1536 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1537 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1538 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1539 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1540 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1541 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1542 if (rc == 1)
1543 rc = VERR_SSM_LIVE_POWERED_OFF;
1544 else if (rc == 2)
1545 rc = VERR_SSM_LIVE_FATAL_ERROR;
1546 else if (rc == 3)
1547 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1548 else if (rc == 4)
1549 {
1550 *pfSuspended = true;
1551 rc = VINF_SUCCESS;
1552 }
1553 else if (rc > 0)
1554 rc = VINF_SUCCESS;
1555 return rc;
1556}
1557
1558
1559/**
1560 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1561 *
1562 * @returns VBox status code.
1563 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1564 *
1565 * @param pVM The VM handle.
1566 * @param pSSM The handle of saved state operation.
1567 *
1568 * @thread EMT(0)
1569 */
1570static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1571{
1572 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1573 VM_ASSERT_EMT0(pVM);
1574
1575 /*
1576 * Advance the state and mark if VMR3Suspend was called.
1577 */
1578 int rc = VINF_SUCCESS;
1579 VMSTATE enmVMState = VMR3GetState(pVM);
1580 if (enmVMState == VMSTATE_SUSPENDED_LS)
1581 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1582 else
1583 {
1584 if (enmVMState != VMSTATE_SAVING)
1585 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1586 rc = VINF_SSM_LIVE_SUSPENDED;
1587 }
1588
1589 /*
1590 * Finish up and release the handle. Careful with the status codes.
1591 */
1592 int rc2 = SSMR3LiveDoStep2(pSSM);
1593 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1594 rc = rc2;
1595
1596 rc2 = SSMR3LiveDone(pSSM);
1597 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1598 rc = rc2;
1599
1600 /*
1601 * Advance to the final state and return.
1602 */
1603 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1604 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1605 return rc;
1606}
1607
1608
1609/**
1610 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1611 * SSMR3LiveSave.
1612 *
1613 * @returns VBox status code.
1614 *
1615 * @param pVM The VM handle.
1616 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1617 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1618 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1619 * @param pvStreamOpsUser The user argument to the stream methods.
1620 * @param enmAfter What to do afterwards.
1621 * @param pfnProgress Progress callback. Optional.
1622 * @param pvProgressUser User argument for the progress callback.
1623 * @param ppSSM Where to return the saved state handle in case of a
1624 * live snapshot scenario.
1625 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1626 *
1627 * @thread EMT
1628 */
1629static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1630 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM,
1631 bool fSkipStateChanges)
1632{
1633 int rc = VINF_SUCCESS;
1634
1635 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1636 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1637
1638 /*
1639 * Validate input.
1640 */
1641 AssertPtrNull(pszFilename);
1642 AssertPtrNull(pStreamOps);
1643 AssertPtr(pVM);
1644 Assert( enmAfter == SSMAFTER_DESTROY
1645 || enmAfter == SSMAFTER_CONTINUE
1646 || enmAfter == SSMAFTER_TELEPORT);
1647 AssertPtr(ppSSM);
1648 *ppSSM = NULL;
1649
1650 /*
1651 * Change the state and perform/start the saving.
1652 */
1653 if (!fSkipStateChanges)
1654 {
1655 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1656 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1657 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1658 }
1659 else
1660 {
1661 Assert(enmAfter != SSMAFTER_TELEPORT);
1662 rc = 1;
1663 }
1664
1665 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1666 {
1667 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1668 if (!fSkipStateChanges)
1669 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1670 }
1671 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1672 {
1673 Assert(!fSkipStateChanges);
1674 if (enmAfter == SSMAFTER_TELEPORT)
1675 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1676 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1677 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1678 /* (We're not subject to cancellation just yet.) */
1679 }
1680 else
1681 Assert(RT_FAILURE(rc));
1682 return rc;
1683}
1684
1685
1686/**
1687 * Common worker for VMR3Save and VMR3Teleport.
1688 *
1689 * @returns VBox status code.
1690 *
1691 * @param pVM The VM handle.
1692 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1693 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1694 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1695 * @param pvStreamOpsUser The user argument to the stream methods.
1696 * @param enmAfter What to do afterwards.
1697 * @param pfnProgress Progress callback. Optional.
1698 * @param pvProgressUser User argument for the progress callback.
1699 * @param pfSuspended Set if we suspended the VM.
1700 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1701 *
1702 * @thread Non-EMT
1703 */
1704static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1705 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1706 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended,
1707 bool fSkipStateChanges)
1708{
1709 /*
1710 * Request the operation in EMT(0).
1711 */
1712 PSSMHANDLE pSSM;
1713 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1714 (PFNRT)vmR3Save, 10, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1715 enmAfter, pfnProgress, pvProgressUser, &pSSM, fSkipStateChanges);
1716 if ( RT_SUCCESS(rc)
1717 && pSSM)
1718 {
1719 Assert(!fSkipStateChanges);
1720
1721 /*
1722 * Live snapshot.
1723 *
1724 * The state handling here is kind of tricky, doing it on EMT(0) helps
1725 * a bit. See the VMSTATE diagram for details.
1726 */
1727 rc = SSMR3LiveDoStep1(pSSM);
1728 if (RT_SUCCESS(rc))
1729 {
1730 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1731 for (;;)
1732 {
1733 /* Try suspend the VM. */
1734 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1735 vmR3LiveDoSuspend, pfSuspended);
1736 if (rc != VERR_TRY_AGAIN)
1737 break;
1738
1739 /* Wait for the state to change. */
1740 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1741 }
1742 if (RT_SUCCESS(rc))
1743 rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1744 else
1745 {
1746 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1747 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1748 }
1749 }
1750 else
1751 {
1752 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1753 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1754
1755 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1756 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1757 rc = rc2;
1758 }
1759 }
1760
1761 return rc;
1762}
1763
1764
1765/**
1766 * Save current VM state.
1767 *
1768 * Can be used for both saving the state and creating snapshots.
1769 *
1770 * When called for a VM in the Running state, the saved state is created live
1771 * and the VM is only suspended when the final part of the saving is preformed.
1772 * The VM state will not be restored to Running in this case and it's up to the
1773 * caller to call VMR3Resume if this is desirable. (The rational is that the
1774 * caller probably wish to reconfigure the disks before resuming the VM.)
1775 *
1776 * @returns VBox status code.
1777 *
1778 * @param pVM The VM which state should be saved.
1779 * @param pszFilename The name of the save state file.
1780 * @param pStreamOps The stream methods.
1781 * @param pvStreamOpsUser The user argument to the stream methods.
1782 * @param fContinueAfterwards Whether continue execution afterwards or not.
1783 * When in doubt, set this to true.
1784 * @param pfnProgress Progress callback. Optional.
1785 * @param pvUser User argument for the progress callback.
1786 * @param pfSuspended Set if we suspended the VM.
1787 *
1788 * @thread Non-EMT.
1789 * @vmstate Suspended or Running
1790 * @vmstateto Saving+Suspended or
1791 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1792 */
1793VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
1794{
1795 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1796 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1797
1798 /*
1799 * Validate input.
1800 */
1801 AssertPtr(pfSuspended);
1802 *pfSuspended = false;
1803 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1804 VM_ASSERT_OTHER_THREAD(pVM);
1805 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1806 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1807 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1808
1809 /*
1810 * Join paths with VMR3Teleport.
1811 */
1812 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1813 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1814 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1815 enmAfter, pfnProgress, pvUser, pfSuspended,
1816 false /* fSkipStateChanges */);
1817 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1818 return rc;
1819}
1820
1821/**
1822 * Save current VM state (used by FTM)
1823 *
1824 * Can be used for both saving the state and creating snapshots.
1825 *
1826 * When called for a VM in the Running state, the saved state is created live
1827 * and the VM is only suspended when the final part of the saving is preformed.
1828 * The VM state will not be restored to Running in this case and it's up to the
1829 * caller to call VMR3Resume if this is desirable. (The rational is that the
1830 * caller probably wish to reconfigure the disks before resuming the VM.)
1831 *
1832 * @returns VBox status code.
1833 *
1834 * @param pVM The VM which state should be saved.
1835 * @param pStreamOps The stream methods.
1836 * @param pvStreamOpsUser The user argument to the stream methods.
1837 * @param pfSuspended Set if we suspended the VM.
1838 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1839 *
1840 * @thread Any
1841 * @vmstate Suspended or Running
1842 * @vmstateto Saving+Suspended or
1843 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1844 */
1845VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended,
1846 bool fSkipStateChanges)
1847{
1848 LogFlow(("VMR3SaveFT: pVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
1849 pVM, pStreamOps, pvStreamOpsUser, pfSuspended));
1850
1851 /*
1852 * Validate input.
1853 */
1854 AssertPtr(pfSuspended);
1855 *pfSuspended = false;
1856 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1857 AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
1858
1859 /*
1860 * Join paths with VMR3Teleport.
1861 */
1862 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1863 NULL, pStreamOps, pvStreamOpsUser,
1864 SSMAFTER_CONTINUE, NULL, NULL, pfSuspended,
1865 fSkipStateChanges);
1866 LogFlow(("VMR3SaveFT: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1867 return rc;
1868}
1869
1870
1871/**
1872 * Teleport the VM (aka live migration).
1873 *
1874 * @returns VBox status code.
1875 *
1876 * @param pVM The VM which state should be saved.
1877 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1878 * @param pStreamOps The stream methods.
1879 * @param pvStreamOpsUser The user argument to the stream methods.
1880 * @param pfnProgress Progress callback. Optional.
1881 * @param pvProgressUser User argument for the progress callback.
1882 * @param pfSuspended Set if we suspended the VM.
1883 *
1884 * @thread Non-EMT.
1885 * @vmstate Suspended or Running
1886 * @vmstateto Saving+Suspended or
1887 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1888 */
1889VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1890 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1891{
1892 LogFlow(("VMR3Teleport: pVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1893 pVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1894
1895 /*
1896 * Validate input.
1897 */
1898 AssertPtr(pfSuspended);
1899 *pfSuspended = false;
1900 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1901 VM_ASSERT_OTHER_THREAD(pVM);
1902 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1903 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1904
1905 /*
1906 * Join paths with VMR3Save.
1907 */
1908 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
1909 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1910 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended,
1911 false /* fSkipStateChanges */);
1912 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1913 return rc;
1914}
1915
1916
1917
1918/**
1919 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1920 *
1921 * @returns VBox status code.
1922 *
1923 * @param pVM The VM handle.
1924 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1925 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1926 * @param pvStreamOpsUser The user argument to the stream methods.
1927 * @param pfnProgress Progress callback. Optional.
1928 * @param pvUser User argument for the progress callback.
1929 * @param fTeleporting Indicates whether we're teleporting or not.
1930 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1931 *
1932 * @thread EMT.
1933 */
1934static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1935 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
1936 bool fSkipStateChanges)
1937{
1938 int rc = VINF_SUCCESS;
1939
1940 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1941 pVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1942
1943 /*
1944 * Validate input (paranoia).
1945 */
1946 AssertPtr(pVM);
1947 AssertPtrNull(pszFilename);
1948 AssertPtrNull(pStreamOps);
1949 AssertPtrNull(pfnProgress);
1950
1951 if (!fSkipStateChanges)
1952 {
1953 /*
1954 * Change the state and perform the load.
1955 *
1956 * Always perform a relocation round afterwards to make sure hypervisor
1957 * selectors and such are correct.
1958 */
1959 rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1960 VMSTATE_LOADING, VMSTATE_CREATED,
1961 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1962 if (RT_FAILURE(rc))
1963 return rc;
1964 }
1965 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1966
1967 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pVM);
1968 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1969 if (RT_SUCCESS(rc))
1970 {
1971 VMR3Relocate(pVM, 0 /*offDelta*/);
1972 if (!fSkipStateChanges)
1973 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1974 }
1975 else
1976 {
1977 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1978 if (!fSkipStateChanges)
1979 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1980
1981 if (cErrorsPriorToSave == VMR3GetErrorCount(pVM))
1982 rc = VMSetError(pVM, rc, RT_SRC_POS,
1983 N_("Unable to restore the virtual machine's saved state from '%s'. "
1984 "It may be damaged or from an older version of VirtualBox. "
1985 "Please discard the saved state before starting the virtual machine"),
1986 pszFilename);
1987 }
1988
1989 return rc;
1990}
1991
1992
1993/**
1994 * Loads a VM state into a newly created VM or a one that is suspended.
1995 *
1996 * To restore a saved state on VM startup, call this function and then resume
1997 * the VM instead of powering it on.
1998 *
1999 * @returns VBox status code.
2000 *
2001 * @param pVM The VM handle.
2002 * @param pszFilename The name of the save state file.
2003 * @param pfnProgress Progress callback. Optional.
2004 * @param pvUser User argument for the progress callback.
2005 *
2006 * @thread Any thread.
2007 * @vmstate Created, Suspended
2008 * @vmstateto Loading+Suspended
2009 */
2010VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
2011{
2012 LogFlow(("VMR3LoadFromFile: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
2013 pVM, pszFilename, pszFilename, pfnProgress, pvUser));
2014
2015 /*
2016 * Validate input.
2017 */
2018 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2019 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
2020
2021 /*
2022 * Forward the request to EMT(0). No need to setup a rendezvous here
2023 * since there is no execution taking place when this call is allowed.
2024 */
2025 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2026 pVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
2027 false /*fTeleporting*/, false /* fSkipStateChanges */);
2028 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
2029 return rc;
2030}
2031
2032
2033/**
2034 * VMR3LoadFromFile for arbitrary file streams.
2035 *
2036 * @returns VBox status code.
2037 *
2038 * @param pVM The VM handle.
2039 * @param pStreamOps The stream methods.
2040 * @param pvStreamOpsUser The user argument to the stream methods.
2041 * @param pfnProgress Progress callback. Optional.
2042 * @param pvProgressUser User argument for the progress callback.
2043 *
2044 * @thread Any thread.
2045 * @vmstate Created, Suspended
2046 * @vmstateto Loading+Suspended
2047 */
2048VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2049 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
2050{
2051 LogFlow(("VMR3LoadFromStream: pVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
2052 pVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2053
2054 /*
2055 * Validate input.
2056 */
2057 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2058 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2059
2060 /*
2061 * Forward the request to EMT(0). No need to setup a rendezvous here
2062 * since there is no execution taking place when this call is allowed.
2063 */
2064 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2065 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
2066 true /*fTeleporting*/, false /* fSkipStateChanges */);
2067 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2068 return rc;
2069}
2070
2071
2072/**
2073 * VMR3LoadFromFileFT for arbitrary file streams.
2074 *
2075 * @returns VBox status code.
2076 *
2077 * @param pVM The VM handle.
2078 * @param pStreamOps The stream methods.
2079 * @param pvStreamOpsUser The user argument to the stream methods.
2080 * @param pfnProgress Progress callback. Optional.
2081 * @param pvProgressUser User argument for the progress callback.
2082 *
2083 * @thread Any thread.
2084 * @vmstate Created, Suspended
2085 * @vmstateto Loading+Suspended
2086 */
2087VMMR3DECL(int) VMR3LoadFromStreamFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
2088{
2089 LogFlow(("VMR3LoadFromStreamFT: pVM=%p pStreamOps=%p pvStreamOpsUser=%p\n",
2090 pVM, pStreamOps, pvStreamOpsUser));
2091
2092 /*
2093 * Validate input.
2094 */
2095 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2096 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2097
2098 /*
2099 * Forward the request to EMT(0). No need to setup a rendezvous here
2100 * since there is no execution taking place when this call is allowed.
2101 */
2102 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2103 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
2104 true /*fTeleporting*/, true /* fSkipStateChanges */);
2105 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2106 return rc;
2107}
2108
2109/**
2110 * EMT rendezvous worker for VMR3PowerOff.
2111 *
2112 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
2113 * return code, see FNVMMEMTRENDEZVOUS.)
2114 *
2115 * @param pVM The VM handle.
2116 * @param pVCpu The VMCPU handle of the EMT.
2117 * @param pvUser Ignored.
2118 */
2119static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
2120{
2121 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
2122 Assert(!pvUser); NOREF(pvUser);
2123
2124 /*
2125 * The first EMT thru here will change the state to PoweringOff.
2126 */
2127 if (pVCpu->idCpu == pVM->cCpus - 1)
2128 {
2129 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
2130 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
2131 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
2132 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
2133 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
2134 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2135 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2136 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2137 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2138 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2139 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2140 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2141 if (RT_FAILURE(rc))
2142 return rc;
2143 if (rc >= 7)
2144 SSMR3Cancel(pVM);
2145 }
2146
2147 /*
2148 * Check the state.
2149 */
2150 VMSTATE enmVMState = VMR3GetState(pVM);
2151 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2152 || enmVMState == VMSTATE_POWERING_OFF_LS,
2153 ("%s\n", VMR3GetStateName(enmVMState)),
2154 VERR_VM_INVALID_VM_STATE);
2155
2156 /*
2157 * EMT(0) does the actual power off work here *after* all the other EMTs
2158 * have been thru and entered the STOPPED state.
2159 */
2160 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2161 if (pVCpu->idCpu == 0)
2162 {
2163 /*
2164 * For debugging purposes, we will log a summary of the guest state at this point.
2165 */
2166 if (enmVMState != VMSTATE_GURU_MEDITATION)
2167 {
2168 /** @todo SMP support? */
2169 /** @todo make the state dumping at VMR3PowerOff optional. */
2170 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2171 RTLogRelPrintf("****************** Guest state at power off ******************\n");
2172 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2173 RTLogRelPrintf("***\n");
2174 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
2175 RTLogRelPrintf("***\n");
2176 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2177 RTLogRelPrintf("***\n");
2178 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2179 /** @todo dump guest call stack. */
2180#if 1 // "temporary" while debugging #1589
2181 RTLogRelPrintf("***\n");
2182 uint32_t esp = CPUMGetGuestESP(pVCpu);
2183 if ( CPUMGetGuestSS(pVCpu) == 0
2184 && esp < _64K)
2185 {
2186 uint8_t abBuf[PAGE_SIZE];
2187 RTLogRelPrintf("***\n"
2188 "ss:sp=0000:%04x ", esp);
2189 uint32_t Start = esp & ~(uint32_t)63;
2190 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
2191 if (RT_SUCCESS(rc))
2192 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
2193 "%.*Rhxd\n",
2194 Start, Start + 0x100 - 1,
2195 0x100, abBuf);
2196 else
2197 RTLogRelPrintf("rc=%Rrc\n", rc);
2198
2199 /* grub ... */
2200 if (esp < 0x2000 && esp > 0x1fc0)
2201 {
2202 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
2203 if (RT_SUCCESS(rc))
2204 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
2205 "%.*Rhxd\n",
2206 0x800, abBuf);
2207 }
2208 /* microsoft cdrom hang ... */
2209 if (true)
2210 {
2211 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
2212 if (RT_SUCCESS(rc))
2213 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
2214 "%.*Rhxd\n",
2215 0x200, abBuf);
2216 }
2217 }
2218#endif
2219 RTLogRelSetBuffering(fOldBuffered);
2220 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2221 }
2222
2223 /*
2224 * Perform the power off notifications and advance the state to
2225 * Off or OffLS.
2226 */
2227 PDMR3PowerOff(pVM);
2228
2229 PUVM pUVM = pVM->pUVM;
2230 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2231 enmVMState = pVM->enmVMState;
2232 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2233 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
2234 else
2235 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF);
2236 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2237 }
2238 return VINF_EM_OFF;
2239}
2240
2241
2242/**
2243 * Power off the VM.
2244 *
2245 * @returns VBox status code. When called on EMT, this will be a strict status
2246 * code that has to be propagated up the call stack.
2247 *
2248 * @param pVM The handle of the VM to be powered off.
2249 *
2250 * @thread Any thread.
2251 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2252 * @vmstateto Off or OffLS
2253 */
2254VMMR3DECL(int) VMR3PowerOff(PVM pVM)
2255{
2256 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
2257 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2258
2259 /*
2260 * Gather all the EMTs to make sure there are no races before
2261 * changing the VM state.
2262 */
2263 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2264 vmR3PowerOff, NULL);
2265 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2266 return rc;
2267}
2268
2269
2270/**
2271 * Destroys the VM.
2272 *
2273 * The VM must be powered off (or never really powered on) to call this
2274 * function. The VM handle is destroyed and can no longer be used up successful
2275 * return.
2276 *
2277 * @returns VBox status code.
2278 *
2279 * @param pVM The handle of the VM which should be destroyed.
2280 *
2281 * @thread Any none emulation thread.
2282 * @vmstate Off, Created
2283 * @vmstateto N/A
2284 */
2285VMMR3DECL(int) VMR3Destroy(PVM pVM)
2286{
2287 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
2288
2289 /*
2290 * Validate input.
2291 */
2292 if (!pVM)
2293 return VERR_INVALID_VM_HANDLE;
2294 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2295 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2296
2297 /*
2298 * Change VM state to destroying and unlink the VM.
2299 */
2300 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2301 if (RT_FAILURE(rc))
2302 return rc;
2303
2304 /** @todo lock this when we start having multiple machines in a process... */
2305 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
2306 if (g_pUVMsHead == pUVM)
2307 g_pUVMsHead = pUVM->pNext;
2308 else
2309 {
2310 PUVM pPrev = g_pUVMsHead;
2311 while (pPrev && pPrev->pNext != pUVM)
2312 pPrev = pPrev->pNext;
2313 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
2314
2315 pPrev->pNext = pUVM->pNext;
2316 }
2317 pUVM->pNext = NULL;
2318
2319 /*
2320 * Notify registered at destruction listeners.
2321 */
2322 vmR3AtDtor(pVM);
2323
2324 /*
2325 * Call vmR3Destroy on each of the EMTs ending with EMT(0) doing the bulk
2326 * of the cleanup.
2327 */
2328 /* vmR3Destroy on all EMTs, ending with EMT(0). */
2329 rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2330 AssertLogRelRC(rc);
2331
2332 /* Wait for EMTs and destroy the UVM. */
2333 vmR3DestroyUVM(pUVM, 30000);
2334
2335 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2336 return VINF_SUCCESS;
2337}
2338
2339
2340/**
2341 * Internal destruction worker.
2342 *
2343 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2344 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2345 * VMR3Destroy().
2346 *
2347 * When called on EMT(0), it will performed the great bulk of the destruction.
2348 * When called on the other EMTs, they will do nothing and the whole purpose is
2349 * to return VINF_EM_TERMINATE so they break out of their run loops.
2350 *
2351 * @returns VINF_EM_TERMINATE.
2352 * @param pVM The VM handle.
2353 */
2354DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2355{
2356 PUVM pUVM = pVM->pUVM;
2357 PVMCPU pVCpu = VMMGetCpu(pVM);
2358 Assert(pVCpu);
2359 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2360
2361 /*
2362 * Only VCPU 0 does the full cleanup (last).
2363 */
2364 if (pVCpu->idCpu == 0)
2365 {
2366 /*
2367 * Dump statistics to the log.
2368 */
2369#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2370 RTLogFlags(NULL, "nodisabled nobuffered");
2371#endif
2372#ifdef VBOX_WITH_STATISTICS
2373 STAMR3Dump(pVM, "*");
2374#else
2375 LogRel(("************************* Statistics *************************\n"));
2376 STAMR3DumpToReleaseLog(pVM, "*");
2377 LogRel(("********************* End of statistics **********************\n"));
2378#endif
2379
2380 /*
2381 * Destroy the VM components.
2382 */
2383 int rc = TMR3Term(pVM);
2384 AssertRC(rc);
2385#ifdef VBOX_WITH_DEBUGGER
2386 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
2387 pUVM->vm.s.pvDBGC = NULL;
2388#endif
2389 AssertRC(rc);
2390 rc = FTMR3Term(pVM);
2391 AssertRC(rc);
2392 rc = DBGFR3Term(pVM);
2393 AssertRC(rc);
2394 rc = PDMR3Term(pVM);
2395 AssertRC(rc);
2396 rc = EMR3Term(pVM);
2397 AssertRC(rc);
2398 rc = IOMR3Term(pVM);
2399 AssertRC(rc);
2400 rc = CSAMR3Term(pVM);
2401 AssertRC(rc);
2402 rc = PATMR3Term(pVM);
2403 AssertRC(rc);
2404 rc = TRPMR3Term(pVM);
2405 AssertRC(rc);
2406 rc = SELMR3Term(pVM);
2407 AssertRC(rc);
2408 rc = REMR3Term(pVM);
2409 AssertRC(rc);
2410 rc = HWACCMR3Term(pVM);
2411 AssertRC(rc);
2412 rc = PGMR3Term(pVM);
2413 AssertRC(rc);
2414 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2415 AssertRC(rc);
2416 rc = CPUMR3Term(pVM);
2417 AssertRC(rc);
2418 SSMR3Term(pVM);
2419 rc = PDMR3CritSectTerm(pVM);
2420 AssertRC(rc);
2421 rc = MMR3Term(pVM);
2422 AssertRC(rc);
2423
2424 /*
2425 * We're done, tell the other EMTs to quit.
2426 */
2427 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2428 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2429 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2430 }
2431 return VINF_EM_TERMINATE;
2432}
2433
2434
2435/**
2436 * Destroys the UVM portion.
2437 *
2438 * This is called as the final step in the VM destruction or as the cleanup
2439 * in case of a creation failure.
2440 *
2441 * @param pVM VM Handle.
2442 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2443 * threads.
2444 */
2445static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2446{
2447 /*
2448 * Signal termination of each the emulation threads and
2449 * wait for them to complete.
2450 */
2451 /* Signal them. */
2452 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2453 if (pUVM->pVM)
2454 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2455 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2456 {
2457 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2458 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2459 }
2460
2461 /* Wait for them. */
2462 uint64_t NanoTS = RTTimeNanoTS();
2463 RTTHREAD hSelf = RTThreadSelf();
2464 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2465 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2466 {
2467 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2468 if ( hThread != NIL_RTTHREAD
2469 && hThread != hSelf)
2470 {
2471 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2472 int rc2 = RTThreadWait(hThread,
2473 cMilliesElapsed < cMilliesEMTWait
2474 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2475 : 2000,
2476 NULL);
2477 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2478 rc2 = RTThreadWait(hThread, 1000, NULL);
2479 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2480 if (RT_SUCCESS(rc2))
2481 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2482 }
2483 }
2484
2485 /* Cleanup the semaphores. */
2486 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2487 {
2488 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2489 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2490 }
2491
2492 /*
2493 * Free the event semaphores associated with the request packets.
2494 */
2495 unsigned cReqs = 0;
2496 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2497 {
2498 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2499 pUVM->vm.s.apReqFree[i] = NULL;
2500 for (; pReq; pReq = pReq->pNext, cReqs++)
2501 {
2502 pReq->enmState = VMREQSTATE_INVALID;
2503 RTSemEventDestroy(pReq->EventSem);
2504 }
2505 }
2506 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2507
2508 /*
2509 * Kill all queued requests. (There really shouldn't be any!)
2510 */
2511 for (unsigned i = 0; i < 10; i++)
2512 {
2513 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pReqs, NULL, PVMREQ);
2514 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2515 if (!pReqHead)
2516 break;
2517 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2518 {
2519 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2520 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2521 RTSemEventSignal(pReq->EventSem);
2522 RTThreadSleep(2);
2523 RTSemEventDestroy(pReq->EventSem);
2524 }
2525 /* give them a chance to respond before we free the request memory. */
2526 RTThreadSleep(32);
2527 }
2528
2529 /*
2530 * Now all queued VCPU requests (again, there shouldn't be any).
2531 */
2532 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2533 {
2534 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2535
2536 for (unsigned i = 0; i < 10; i++)
2537 {
2538 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pReqs, NULL, PVMREQ);
2539 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2540 if (!pReqHead)
2541 break;
2542 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2543 {
2544 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2545 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2546 RTSemEventSignal(pReq->EventSem);
2547 RTThreadSleep(2);
2548 RTSemEventDestroy(pReq->EventSem);
2549 }
2550 /* give them a chance to respond before we free the request memory. */
2551 RTThreadSleep(32);
2552 }
2553 }
2554
2555 /*
2556 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2557 */
2558 PDMR3TermUVM(pUVM);
2559
2560 /*
2561 * Terminate the support library if initialized.
2562 */
2563 if (pUVM->vm.s.pSession)
2564 {
2565 int rc = SUPR3Term(false /*fForced*/);
2566 AssertRC(rc);
2567 pUVM->vm.s.pSession = NIL_RTR0PTR;
2568 }
2569
2570 /*
2571 * Release the UVM structure reference.
2572 */
2573 VMR3ReleaseUVM(pUVM);
2574
2575 /*
2576 * Clean up and flush logs.
2577 */
2578#ifdef LOG_ENABLED
2579 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2580#endif
2581 RTLogFlush(NULL);
2582}
2583
2584
2585/**
2586 * Enumerates the VMs in this process.
2587 *
2588 * @returns Pointer to the next VM.
2589 * @returns NULL when no more VMs.
2590 * @param pVMPrev The previous VM
2591 * Use NULL to start the enumeration.
2592 */
2593VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2594{
2595 /*
2596 * This is quick and dirty. It has issues with VM being
2597 * destroyed during the enumeration.
2598 */
2599 PUVM pNext;
2600 if (pVMPrev)
2601 pNext = pVMPrev->pUVM->pNext;
2602 else
2603 pNext = g_pUVMsHead;
2604 return pNext ? pNext->pVM : NULL;
2605}
2606
2607
2608/**
2609 * Registers an at VM destruction callback.
2610 *
2611 * @returns VBox status code.
2612 * @param pfnAtDtor Pointer to callback.
2613 * @param pvUser User argument.
2614 */
2615VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2616{
2617 /*
2618 * Check if already registered.
2619 */
2620 VM_ATDTOR_LOCK();
2621 PVMATDTOR pCur = g_pVMAtDtorHead;
2622 while (pCur)
2623 {
2624 if (pfnAtDtor == pCur->pfnAtDtor)
2625 {
2626 VM_ATDTOR_UNLOCK();
2627 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2628 return VERR_INVALID_PARAMETER;
2629 }
2630
2631 /* next */
2632 pCur = pCur->pNext;
2633 }
2634 VM_ATDTOR_UNLOCK();
2635
2636 /*
2637 * Allocate new entry.
2638 */
2639 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2640 if (!pVMAtDtor)
2641 return VERR_NO_MEMORY;
2642
2643 VM_ATDTOR_LOCK();
2644 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2645 pVMAtDtor->pvUser = pvUser;
2646 pVMAtDtor->pNext = g_pVMAtDtorHead;
2647 g_pVMAtDtorHead = pVMAtDtor;
2648 VM_ATDTOR_UNLOCK();
2649
2650 return VINF_SUCCESS;
2651}
2652
2653
2654/**
2655 * Deregisters an at VM destruction callback.
2656 *
2657 * @returns VBox status code.
2658 * @param pfnAtDtor Pointer to callback.
2659 */
2660VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2661{
2662 /*
2663 * Find it, unlink it and free it.
2664 */
2665 VM_ATDTOR_LOCK();
2666 PVMATDTOR pPrev = NULL;
2667 PVMATDTOR pCur = g_pVMAtDtorHead;
2668 while (pCur)
2669 {
2670 if (pfnAtDtor == pCur->pfnAtDtor)
2671 {
2672 if (pPrev)
2673 pPrev->pNext = pCur->pNext;
2674 else
2675 g_pVMAtDtorHead = pCur->pNext;
2676 pCur->pNext = NULL;
2677 VM_ATDTOR_UNLOCK();
2678
2679 RTMemFree(pCur);
2680 return VINF_SUCCESS;
2681 }
2682
2683 /* next */
2684 pPrev = pCur;
2685 pCur = pCur->pNext;
2686 }
2687 VM_ATDTOR_UNLOCK();
2688
2689 return VERR_INVALID_PARAMETER;
2690}
2691
2692
2693/**
2694 * Walks the list of at VM destructor callbacks.
2695 * @param pVM The VM which is about to be destroyed.
2696 */
2697static void vmR3AtDtor(PVM pVM)
2698{
2699 /*
2700 * Find it, unlink it and free it.
2701 */
2702 VM_ATDTOR_LOCK();
2703 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2704 pCur->pfnAtDtor(pVM, pCur->pvUser);
2705 VM_ATDTOR_UNLOCK();
2706}
2707
2708
2709/**
2710 * Worker which checks integrity of some internal structures.
2711 * This is yet another attempt to track down that AVL tree crash.
2712 */
2713static void vmR3CheckIntegrity(PVM pVM)
2714{
2715#ifdef VBOX_STRICT
2716 int rc = PGMR3CheckIntegrity(pVM);
2717 AssertReleaseRC(rc);
2718#endif
2719}
2720
2721
2722/**
2723 * EMT rendezvous worker for VMR3Reset.
2724 *
2725 * This is called by the emulation threads as a response to the reset request
2726 * issued by VMR3Reset().
2727 *
2728 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2729 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2730 *
2731 * @param pVM The VM handle.
2732 * @param pVCpu The VMCPU handle of the EMT.
2733 * @param pvUser Ignored.
2734 */
2735static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2736{
2737 Assert(!pvUser); NOREF(pvUser);
2738
2739 /*
2740 * The first EMT will try change the state to resetting. If this fails,
2741 * we won't get called for the other EMTs.
2742 */
2743 if (pVCpu->idCpu == pVM->cCpus - 1)
2744 {
2745 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3,
2746 VMSTATE_RESETTING, VMSTATE_RUNNING,
2747 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2748 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2749 if (RT_FAILURE(rc))
2750 return rc;
2751 }
2752
2753 /*
2754 * Check the state.
2755 */
2756 VMSTATE enmVMState = VMR3GetState(pVM);
2757 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2758 || enmVMState == VMSTATE_RESETTING_LS,
2759 ("%s\n", VMR3GetStateName(enmVMState)),
2760 VERR_INTERNAL_ERROR_4);
2761
2762 /*
2763 * EMT(0) does the full cleanup *after* all the other EMTs has been
2764 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2765 *
2766 * Because there are per-cpu reset routines and order may/is important,
2767 * the following sequence looks a bit ugly...
2768 */
2769 if (pVCpu->idCpu == 0)
2770 vmR3CheckIntegrity(pVM);
2771
2772 /* Reset the VCpu state. */
2773 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2774
2775 /* Clear all pending forced actions. */
2776 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2777
2778 /*
2779 * Reset the VM components.
2780 */
2781 if (pVCpu->idCpu == 0)
2782 {
2783 PATMR3Reset(pVM);
2784 CSAMR3Reset(pVM);
2785 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2786 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2787/** @todo PGMR3Reset should be called after PDMR3Reset really, because we'll trash OS <-> hardware
2788 * communication structures residing in RAM when done in the other order. I.e. the device must be
2789 * quiesced first, then we clear the memory and plan tables. Probably have to make these things
2790 * explicit in some way, some memory setup pass or something.
2791 * (Example: DevAHCI may assert if memory is zeroed before it has read the FIS.)
2792 *
2793 * @bugref{4467}
2794 */
2795 MMR3Reset(pVM);
2796 PDMR3Reset(pVM);
2797 SELMR3Reset(pVM);
2798 TRPMR3Reset(pVM);
2799 REMR3Reset(pVM);
2800 IOMR3Reset(pVM);
2801 CPUMR3Reset(pVM);
2802 }
2803 CPUMR3ResetCpu(pVCpu);
2804 if (pVCpu->idCpu == 0)
2805 {
2806 TMR3Reset(pVM);
2807 EMR3Reset(pVM);
2808 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2809
2810#ifdef LOG_ENABLED
2811 /*
2812 * Debug logging.
2813 */
2814 RTLogPrintf("\n\nThe VM was reset:\n");
2815 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2816#endif
2817
2818 /*
2819 * Since EMT(0) is the last to go thru here, it will advance the state.
2820 * When a live save is active, we will move on to SuspendingLS but
2821 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2822 */
2823 PUVM pUVM = pVM->pUVM;
2824 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2825 enmVMState = pVM->enmVMState;
2826 if (enmVMState == VMSTATE_RESETTING)
2827 {
2828 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2829 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2830 else
2831 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING);
2832 }
2833 else
2834 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS);
2835 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2836
2837 vmR3CheckIntegrity(pVM);
2838
2839 /*
2840 * Do the suspend bit as well.
2841 * It only requires some EMT(0) work at present.
2842 */
2843 if (enmVMState != VMSTATE_RESETTING)
2844 {
2845 vmR3SuspendDoWork(pVM);
2846 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2847 }
2848 }
2849
2850 return enmVMState == VMSTATE_RESETTING
2851 ? VINF_EM_RESET
2852 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2853}
2854
2855
2856/**
2857 * Reset the current VM.
2858 *
2859 * @returns VBox status code.
2860 * @param pVM VM to reset.
2861 */
2862VMMR3DECL(int) VMR3Reset(PVM pVM)
2863{
2864 LogFlow(("VMR3Reset:\n"));
2865 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2866
2867 /*
2868 * Gather all the EMTs to make sure there are no races before
2869 * changing the VM state.
2870 */
2871 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2872 vmR3Reset, NULL);
2873 LogFlow(("VMR3Reset: returns %Rrc\n", rc));
2874 return rc;
2875}
2876
2877
2878/**
2879 * Gets the user mode VM structure pointer given the VM handle.
2880 *
2881 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
2882 * invalid (asserted).
2883 * @param pVM The VM handle.
2884 * @sa VMR3GetVM, VMR3RetainUVM
2885 */
2886VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
2887{
2888 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
2889 return pVM->pUVM;
2890}
2891
2892
2893/**
2894 * Gets the shared VM structure pointer given the pointer to the user mode VM
2895 * structure.
2896 *
2897 * @returns Pointer to the shared VM structure.
2898 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
2899 * is currently associated with it.
2900 * @param pUVM The user mode VM handle.
2901 * @sa VMR3GetUVM
2902 */
2903VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
2904{
2905 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2906 return pUVM->pVM;
2907}
2908
2909
2910/**
2911 * Retain the user mode VM handle.
2912 *
2913 * @returns Reference count.
2914 * UINT32_MAX if @a pUVM is invalid.
2915 *
2916 * @param pUVM The user mode VM handle.
2917 * @sa VMR3ReleaseUVM
2918 */
2919VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
2920{
2921 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2922 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
2923 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
2924 return cRefs;
2925}
2926
2927
2928/**
2929 * Does the final release of the UVM structure.
2930 *
2931 * @param pUVM The user mode VM handle.
2932 */
2933static void vmR3DoReleaseUVM(PUVM pUVM)
2934{
2935 /*
2936 * Free the UVM.
2937 */
2938 Assert(!pUVM->pVM);
2939
2940 MMR3TermUVM(pUVM);
2941 STAMR3TermUVM(pUVM);
2942
2943 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2944 RTTlsFree(pUVM->vm.s.idxTLS);
2945 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
2946}
2947
2948
2949/**
2950 * Releases a refernece to the mode VM handle.
2951 *
2952 * @returns The new reference count, 0 if destroyed.
2953 * UINT32_MAX if @a pUVM is invalid.
2954 *
2955 * @param pUVM The user mode VM handle.
2956 * @sa VMR3RetainUVM
2957 */
2958VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
2959{
2960 if (!pUVM)
2961 return 0;
2962 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2963 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
2964 if (!cRefs)
2965 vmR3DoReleaseUVM(pUVM);
2966 else
2967 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
2968 return cRefs;
2969}
2970
2971
2972/**
2973 * Gets the VM name.
2974 *
2975 * @returns Pointer to a read-only string containing the name. NULL if called
2976 * too early.
2977 * @param pUVM The user mode VM handle.
2978 */
2979VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
2980{
2981 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2982 return pUVM->vm.s.pszName;
2983}
2984
2985
2986/**
2987 * Gets the VM UUID.
2988 *
2989 * @returns pUuid on success, NULL on failure.
2990 * @param pUVM The user mode VM handle.
2991 * @param pUuid Where to store the UUID.
2992 */
2993VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
2994{
2995 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2996 AssertPtrReturn(pUuid, NULL);
2997
2998 *pUuid = pUVM->vm.s.Uuid;
2999 return pUuid;
3000}
3001
3002
3003/**
3004 * Gets the current VM state.
3005 *
3006 * @returns The current VM state.
3007 * @param pVM VM handle.
3008 * @thread Any
3009 */
3010VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
3011{
3012 VM_ASSERT_VALID_EXT_RETURN(pVM, VMSTATE_TERMINATED);
3013 return pVM->enmVMState;
3014}
3015
3016
3017/**
3018 * Gets the current VM state.
3019 *
3020 * @returns The current VM state.
3021 * @param pUVM The user-mode VM handle.
3022 * @thread Any
3023 */
3024VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
3025{
3026 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
3027 if (RT_UNLIKELY(!pUVM->pVM))
3028 return VMSTATE_TERMINATED;
3029 return pUVM->pVM->enmVMState;
3030}
3031
3032
3033/**
3034 * Gets the state name string for a VM state.
3035 *
3036 * @returns Pointer to the state name. (readonly)
3037 * @param enmState The state.
3038 */
3039VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
3040{
3041 switch (enmState)
3042 {
3043 case VMSTATE_CREATING: return "CREATING";
3044 case VMSTATE_CREATED: return "CREATED";
3045 case VMSTATE_LOADING: return "LOADING";
3046 case VMSTATE_POWERING_ON: return "POWERING_ON";
3047 case VMSTATE_RESUMING: return "RESUMING";
3048 case VMSTATE_RUNNING: return "RUNNING";
3049 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
3050 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
3051 case VMSTATE_RESETTING: return "RESETTING";
3052 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
3053 case VMSTATE_SUSPENDED: return "SUSPENDED";
3054 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
3055 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
3056 case VMSTATE_SUSPENDING: return "SUSPENDING";
3057 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
3058 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
3059 case VMSTATE_SAVING: return "SAVING";
3060 case VMSTATE_DEBUGGING: return "DEBUGGING";
3061 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
3062 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
3063 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
3064 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
3065 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
3066 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
3067 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
3068 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
3069 case VMSTATE_OFF: return "OFF";
3070 case VMSTATE_OFF_LS: return "OFF_LS";
3071 case VMSTATE_DESTROYING: return "DESTROYING";
3072 case VMSTATE_TERMINATED: return "TERMINATED";
3073
3074 default:
3075 AssertMsgFailed(("Unknown state %d\n", enmState));
3076 return "Unknown!\n";
3077 }
3078}
3079
3080
3081/**
3082 * Validates the state transition in strict builds.
3083 *
3084 * @returns true if valid, false if not.
3085 *
3086 * @param enmStateOld The old (current) state.
3087 * @param enmStateNew The proposed new state.
3088 *
3089 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
3090 * diagram (under State Machine Diagram).
3091 */
3092static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
3093{
3094#ifdef VBOX_STRICT
3095 switch (enmStateOld)
3096 {
3097 case VMSTATE_CREATING:
3098 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3099 break;
3100
3101 case VMSTATE_CREATED:
3102 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
3103 || enmStateNew == VMSTATE_POWERING_ON
3104 || enmStateNew == VMSTATE_POWERING_OFF
3105 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3106 break;
3107
3108 case VMSTATE_LOADING:
3109 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3110 || enmStateNew == VMSTATE_LOAD_FAILURE
3111 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3112 break;
3113
3114 case VMSTATE_POWERING_ON:
3115 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3116 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3117 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3118 break;
3119
3120 case VMSTATE_RESUMING:
3121 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3122 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3123 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3124 break;
3125
3126 case VMSTATE_RUNNING:
3127 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3128 || enmStateNew == VMSTATE_SUSPENDING
3129 || enmStateNew == VMSTATE_RESETTING
3130 || enmStateNew == VMSTATE_RUNNING_LS
3131 || enmStateNew == VMSTATE_RUNNING_FT
3132 || enmStateNew == VMSTATE_DEBUGGING
3133 || enmStateNew == VMSTATE_FATAL_ERROR
3134 || enmStateNew == VMSTATE_GURU_MEDITATION
3135 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3136 break;
3137
3138 case VMSTATE_RUNNING_LS:
3139 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3140 || enmStateNew == VMSTATE_SUSPENDING_LS
3141 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3142 || enmStateNew == VMSTATE_RESETTING_LS
3143 || enmStateNew == VMSTATE_RUNNING
3144 || enmStateNew == VMSTATE_DEBUGGING_LS
3145 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3146 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3147 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3148 break;
3149
3150 case VMSTATE_RUNNING_FT:
3151 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3152 || enmStateNew == VMSTATE_FATAL_ERROR
3153 || enmStateNew == VMSTATE_GURU_MEDITATION
3154 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3155 break;
3156
3157 case VMSTATE_RESETTING:
3158 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3159 break;
3160
3161 case VMSTATE_RESETTING_LS:
3162 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3163 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3164 break;
3165
3166 case VMSTATE_SUSPENDING:
3167 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3168 break;
3169
3170 case VMSTATE_SUSPENDING_LS:
3171 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3172 || enmStateNew == VMSTATE_SUSPENDED_LS
3173 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3174 break;
3175
3176 case VMSTATE_SUSPENDING_EXT_LS:
3177 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3178 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3179 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3180 break;
3181
3182 case VMSTATE_SUSPENDED:
3183 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3184 || enmStateNew == VMSTATE_SAVING
3185 || enmStateNew == VMSTATE_RESETTING
3186 || enmStateNew == VMSTATE_RESUMING
3187 || enmStateNew == VMSTATE_LOADING
3188 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3189 break;
3190
3191 case VMSTATE_SUSPENDED_LS:
3192 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3193 || enmStateNew == VMSTATE_SAVING
3194 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3195 break;
3196
3197 case VMSTATE_SUSPENDED_EXT_LS:
3198 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3199 || enmStateNew == VMSTATE_SAVING
3200 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3201 break;
3202
3203 case VMSTATE_SAVING:
3204 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3205 break;
3206
3207 case VMSTATE_DEBUGGING:
3208 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3209 || enmStateNew == VMSTATE_POWERING_OFF
3210 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3211 break;
3212
3213 case VMSTATE_DEBUGGING_LS:
3214 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3215 || enmStateNew == VMSTATE_RUNNING_LS
3216 || enmStateNew == VMSTATE_POWERING_OFF_LS
3217 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3218 break;
3219
3220 case VMSTATE_POWERING_OFF:
3221 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3222 break;
3223
3224 case VMSTATE_POWERING_OFF_LS:
3225 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3226 || enmStateNew == VMSTATE_OFF_LS
3227 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3228 break;
3229
3230 case VMSTATE_OFF:
3231 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3232 break;
3233
3234 case VMSTATE_OFF_LS:
3235 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3236 break;
3237
3238 case VMSTATE_FATAL_ERROR:
3239 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3240 break;
3241
3242 case VMSTATE_FATAL_ERROR_LS:
3243 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3244 || enmStateNew == VMSTATE_POWERING_OFF_LS
3245 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3246 break;
3247
3248 case VMSTATE_GURU_MEDITATION:
3249 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3250 || enmStateNew == VMSTATE_POWERING_OFF
3251 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3252 break;
3253
3254 case VMSTATE_GURU_MEDITATION_LS:
3255 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3256 || enmStateNew == VMSTATE_DEBUGGING_LS
3257 || enmStateNew == VMSTATE_POWERING_OFF_LS
3258 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3259 break;
3260
3261 case VMSTATE_LOAD_FAILURE:
3262 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3263 break;
3264
3265 case VMSTATE_DESTROYING:
3266 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3267 break;
3268
3269 case VMSTATE_TERMINATED:
3270 default:
3271 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3272 break;
3273 }
3274#endif /* VBOX_STRICT */
3275 return true;
3276}
3277
3278
3279/**
3280 * Does the state change callouts.
3281 *
3282 * The caller owns the AtStateCritSect.
3283 *
3284 * @param pVM The VM handle.
3285 * @param pUVM The UVM handle.
3286 * @param enmStateNew The New state.
3287 * @param enmStateOld The old state.
3288 */
3289static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3290{
3291 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3292
3293 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3294 {
3295 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
3296 if ( enmStateNew != VMSTATE_DESTROYING
3297 && pVM->enmVMState == VMSTATE_DESTROYING)
3298 break;
3299 AssertMsg(pVM->enmVMState == enmStateNew,
3300 ("You are not allowed to change the state while in the change callback, except "
3301 "from destroying the VM. There are restrictions in the way the state changes "
3302 "are propagated up to the EM execution loop and it makes the program flow very "
3303 "difficult to follow. (%s, expected %s, old %s)\n",
3304 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3305 VMR3GetStateName(enmStateOld)));
3306 }
3307}
3308
3309
3310/**
3311 * Sets the current VM state, with the AtStatCritSect already entered.
3312 *
3313 * @param pVM The VM handle.
3314 * @param pUVM The UVM handle.
3315 * @param enmStateNew The new state.
3316 * @param enmStateOld The old state.
3317 */
3318static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3319{
3320 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3321
3322 AssertMsg(pVM->enmVMState == enmStateOld,
3323 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3324 pUVM->vm.s.enmPrevVMState = enmStateOld;
3325 pVM->enmVMState = enmStateNew;
3326 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3327
3328 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3329}
3330
3331
3332/**
3333 * Sets the current VM state.
3334 *
3335 * @param pVM VM handle.
3336 * @param enmStateNew The new state.
3337 * @param enmStateOld The old state (for asserting only).
3338 */
3339static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3340{
3341 PUVM pUVM = pVM->pUVM;
3342 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3343
3344 AssertMsg(pVM->enmVMState == enmStateOld,
3345 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3346 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
3347
3348 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3349}
3350
3351
3352/**
3353 * Tries to perform a state transition.
3354 *
3355 * @returns The 1-based ordinal of the succeeding transition.
3356 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3357 *
3358 * @param pVM The VM handle.
3359 * @param pszWho Who is trying to change it.
3360 * @param cTransitions The number of transitions in the ellipsis.
3361 * @param ... Transition pairs; new, old.
3362 */
3363static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3364{
3365 va_list va;
3366 VMSTATE enmStateNew = VMSTATE_CREATED;
3367 VMSTATE enmStateOld = VMSTATE_CREATED;
3368
3369#ifdef VBOX_STRICT
3370 /*
3371 * Validate the input first.
3372 */
3373 va_start(va, cTransitions);
3374 for (unsigned i = 0; i < cTransitions; i++)
3375 {
3376 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3377 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3378 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3379 }
3380 va_end(va);
3381#endif
3382
3383 /*
3384 * Grab the lock and see if any of the proposed transitions works out.
3385 */
3386 va_start(va, cTransitions);
3387 int rc = VERR_VM_INVALID_VM_STATE;
3388 PUVM pUVM = pVM->pUVM;
3389 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3390
3391 VMSTATE enmStateCur = pVM->enmVMState;
3392
3393 for (unsigned i = 0; i < cTransitions; i++)
3394 {
3395 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3396 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3397 if (enmStateCur == enmStateOld)
3398 {
3399 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
3400 rc = i + 1;
3401 break;
3402 }
3403 }
3404
3405 if (RT_FAILURE(rc))
3406 {
3407 /*
3408 * Complain about it.
3409 */
3410 if (cTransitions == 1)
3411 {
3412 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3413 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3414 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3415 N_("%s failed because the VM state is %s instead of %s"),
3416 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3417 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3418 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3419 }
3420 else
3421 {
3422 va_end(va);
3423 va_start(va, cTransitions);
3424 LogRel(("%s:\n", pszWho));
3425 for (unsigned i = 0; i < cTransitions; i++)
3426 {
3427 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3428 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3429 LogRel(("%s%s -> %s",
3430 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3431 }
3432 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3433 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3434 N_("%s failed because the current VM state, %s, was not found in the state transition table"),
3435 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3436 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3437 pszWho, VMR3GetStateName(enmStateCur)));
3438 }
3439 }
3440
3441 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3442 va_end(va);
3443 Assert(rc > 0 || rc < 0);
3444 return rc;
3445}
3446
3447
3448/**
3449 * Flag a guru meditation ... a hack.
3450 *
3451 * @param pVM The VM handle
3452 *
3453 * @todo Rewrite this part. The guru meditation should be flagged
3454 * immediately by the VMM and not by VMEmt.cpp when it's all over.
3455 */
3456void vmR3SetGuruMeditation(PVM pVM)
3457{
3458 PUVM pUVM = pVM->pUVM;
3459 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3460
3461 VMSTATE enmStateCur = pVM->enmVMState;
3462 if (enmStateCur == VMSTATE_RUNNING)
3463 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING);
3464 else if (enmStateCur == VMSTATE_RUNNING_LS)
3465 {
3466 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
3467 SSMR3Cancel(pVM);
3468 }
3469
3470 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3471}
3472
3473
3474/**
3475 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3476 *
3477 * @param pVM The VM handle.
3478 */
3479void vmR3SetTerminated(PVM pVM)
3480{
3481 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3482}
3483
3484
3485/**
3486 * Checks if the VM was teleported and hasn't been fully resumed yet.
3487 *
3488 * This applies to both sides of the teleportation since we may leave a working
3489 * clone behind and the user is allowed to resume this...
3490 *
3491 * @returns true / false.
3492 * @param pVM The VM handle.
3493 * @thread Any thread.
3494 */
3495VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3496{
3497 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3498 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3499}
3500
3501
3502/**
3503 * Registers a VM state change callback.
3504 *
3505 * You are not allowed to call any function which changes the VM state from a
3506 * state callback.
3507 *
3508 * @returns VBox status code.
3509 * @param pVM VM handle.
3510 * @param pfnAtState Pointer to callback.
3511 * @param pvUser User argument.
3512 * @thread Any.
3513 */
3514VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3515{
3516 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3517
3518 /*
3519 * Validate input.
3520 */
3521 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3522 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3523
3524 /*
3525 * Allocate a new record.
3526 */
3527 PUVM pUVM = pVM->pUVM;
3528 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3529 if (!pNew)
3530 return VERR_NO_MEMORY;
3531
3532 /* fill */
3533 pNew->pfnAtState = pfnAtState;
3534 pNew->pvUser = pvUser;
3535
3536 /* insert */
3537 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3538 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3539 *pUVM->vm.s.ppAtStateNext = pNew;
3540 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3541 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3542
3543 return VINF_SUCCESS;
3544}
3545
3546
3547/**
3548 * Deregisters a VM state change callback.
3549 *
3550 * @returns VBox status code.
3551 * @param pVM VM handle.
3552 * @param pfnAtState Pointer to callback.
3553 * @param pvUser User argument.
3554 * @thread Any.
3555 */
3556VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3557{
3558 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3559
3560 /*
3561 * Validate input.
3562 */
3563 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3564 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3565
3566 PUVM pUVM = pVM->pUVM;
3567 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3568
3569 /*
3570 * Search the list for the entry.
3571 */
3572 PVMATSTATE pPrev = NULL;
3573 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3574 while ( pCur
3575 && ( pCur->pfnAtState != pfnAtState
3576 || pCur->pvUser != pvUser))
3577 {
3578 pPrev = pCur;
3579 pCur = pCur->pNext;
3580 }
3581 if (!pCur)
3582 {
3583 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3584 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3585 return VERR_FILE_NOT_FOUND;
3586 }
3587
3588 /*
3589 * Unlink it.
3590 */
3591 if (pPrev)
3592 {
3593 pPrev->pNext = pCur->pNext;
3594 if (!pCur->pNext)
3595 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3596 }
3597 else
3598 {
3599 pUVM->vm.s.pAtState = pCur->pNext;
3600 if (!pCur->pNext)
3601 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3602 }
3603
3604 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3605
3606 /*
3607 * Free it.
3608 */
3609 pCur->pfnAtState = NULL;
3610 pCur->pNext = NULL;
3611 MMR3HeapFree(pCur);
3612
3613 return VINF_SUCCESS;
3614}
3615
3616
3617/**
3618 * Registers a VM error callback.
3619 *
3620 * @returns VBox status code.
3621 * @param pVM The VM handle.
3622 * @param pfnAtError Pointer to callback.
3623 * @param pvUser User argument.
3624 * @thread Any.
3625 */
3626VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3627{
3628 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3629 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3630}
3631
3632
3633/**
3634 * Registers a VM error callback.
3635 *
3636 * @returns VBox status code.
3637 * @param pUVM The VM handle.
3638 * @param pfnAtError Pointer to callback.
3639 * @param pvUser User argument.
3640 * @thread Any.
3641 */
3642VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3643{
3644 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3645
3646 /*
3647 * Validate input.
3648 */
3649 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3650 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3651
3652 /*
3653 * Allocate a new record.
3654 */
3655 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3656 if (!pNew)
3657 return VERR_NO_MEMORY;
3658
3659 /* fill */
3660 pNew->pfnAtError = pfnAtError;
3661 pNew->pvUser = pvUser;
3662
3663 /* insert */
3664 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3665 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3666 *pUVM->vm.s.ppAtErrorNext = pNew;
3667 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3668 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3669
3670 return VINF_SUCCESS;
3671}
3672
3673
3674/**
3675 * Deregisters a VM error callback.
3676 *
3677 * @returns VBox status code.
3678 * @param pVM The VM handle.
3679 * @param pfnAtError Pointer to callback.
3680 * @param pvUser User argument.
3681 * @thread Any.
3682 */
3683VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3684{
3685 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3686
3687 /*
3688 * Validate input.
3689 */
3690 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3691 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3692
3693 PUVM pUVM = pVM->pUVM;
3694 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3695
3696 /*
3697 * Search the list for the entry.
3698 */
3699 PVMATERROR pPrev = NULL;
3700 PVMATERROR pCur = pUVM->vm.s.pAtError;
3701 while ( pCur
3702 && ( pCur->pfnAtError != pfnAtError
3703 || pCur->pvUser != pvUser))
3704 {
3705 pPrev = pCur;
3706 pCur = pCur->pNext;
3707 }
3708 if (!pCur)
3709 {
3710 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3711 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3712 return VERR_FILE_NOT_FOUND;
3713 }
3714
3715 /*
3716 * Unlink it.
3717 */
3718 if (pPrev)
3719 {
3720 pPrev->pNext = pCur->pNext;
3721 if (!pCur->pNext)
3722 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3723 }
3724 else
3725 {
3726 pUVM->vm.s.pAtError = pCur->pNext;
3727 if (!pCur->pNext)
3728 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3729 }
3730
3731 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3732
3733 /*
3734 * Free it.
3735 */
3736 pCur->pfnAtError = NULL;
3737 pCur->pNext = NULL;
3738 MMR3HeapFree(pCur);
3739
3740 return VINF_SUCCESS;
3741}
3742
3743
3744/**
3745 * Ellipsis to va_list wrapper for calling pfnAtError.
3746 */
3747static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3748{
3749 va_list va;
3750 va_start(va, pszFormat);
3751 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3752 va_end(va);
3753}
3754
3755
3756/**
3757 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3758 * The message is found in VMINT.
3759 *
3760 * @param pVM The VM handle.
3761 * @thread EMT.
3762 */
3763VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3764{
3765 VM_ASSERT_EMT(pVM);
3766 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contracts!\n"));
3767
3768 /*
3769 * Unpack the error (if we managed to format one).
3770 */
3771 PVMERROR pErr = pVM->vm.s.pErrorR3;
3772 const char *pszFile = NULL;
3773 const char *pszFunction = NULL;
3774 uint32_t iLine = 0;
3775 const char *pszMessage;
3776 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3777 if (pErr)
3778 {
3779 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3780 if (pErr->offFile)
3781 pszFile = (const char *)pErr + pErr->offFile;
3782 iLine = pErr->iLine;
3783 if (pErr->offFunction)
3784 pszFunction = (const char *)pErr + pErr->offFunction;
3785 if (pErr->offMessage)
3786 pszMessage = (const char *)pErr + pErr->offMessage;
3787 else
3788 pszMessage = "No message!";
3789 }
3790 else
3791 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3792
3793 /*
3794 * Call the at error callbacks.
3795 */
3796 PUVM pUVM = pVM->pUVM;
3797 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3798 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3799 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3800 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3801 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3802}
3803
3804
3805/**
3806 * Gets the number of errors raised via VMSetError.
3807 *
3808 * This can be used avoid double error messages.
3809 *
3810 * @returns The error count.
3811 * @param pVM The VM handle.
3812 */
3813VMMR3DECL(uint32_t) VMR3GetErrorCount(PVM pVM)
3814{
3815 AssertPtrReturn(pVM, 0);
3816 return VMR3GetErrorCountU(pVM->pUVM);
3817}
3818
3819
3820/**
3821 * Gets the number of errors raised via VMSetError.
3822 *
3823 * This can be used avoid double error messages.
3824 *
3825 * @returns The error count.
3826 * @param pVM The VM handle.
3827 */
3828VMMR3DECL(uint32_t) VMR3GetErrorCountU(PUVM pUVM)
3829{
3830 AssertPtrReturn(pUVM, 0);
3831 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3832 return pUVM->vm.s.cErrors;
3833}
3834
3835
3836/**
3837 * Creation time wrapper for vmR3SetErrorUV.
3838 *
3839 * @returns rc.
3840 * @param pUVM Pointer to the user mode VM structure.
3841 * @param rc The VBox status code.
3842 * @param RT_SRC_POS_DECL The source position of this error.
3843 * @param pszFormat Format string.
3844 * @param ... The arguments.
3845 * @thread Any thread.
3846 */
3847static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3848{
3849 va_list va;
3850 va_start(va, pszFormat);
3851 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3852 va_end(va);
3853 return rc;
3854}
3855
3856
3857/**
3858 * Worker which calls everyone listening to the VM error messages.
3859 *
3860 * @param pUVM Pointer to the user mode VM structure.
3861 * @param rc The VBox status code.
3862 * @param RT_SRC_POS_DECL The source position of this error.
3863 * @param pszFormat Format string.
3864 * @param pArgs Pointer to the format arguments.
3865 * @thread EMT
3866 */
3867DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3868{
3869 /*
3870 * Log the error.
3871 */
3872 va_list va3;
3873 va_copy(va3, *pArgs);
3874 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3875 "VMSetError: %N\n",
3876 pszFile, iLine, pszFunction, rc,
3877 pszFormat, &va3);
3878 va_end(va3);
3879
3880#ifdef LOG_ENABLED
3881 va_copy(va3, *pArgs);
3882 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3883 "%N\n",
3884 pszFile, iLine, pszFunction, rc,
3885 pszFormat, &va3);
3886 va_end(va3);
3887#endif
3888
3889 /*
3890 * Make a copy of the message.
3891 */
3892 if (pUVM->pVM)
3893 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3894
3895 /*
3896 * Call the at error callbacks.
3897 */
3898 bool fCalledSomeone = false;
3899 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3900 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3901 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3902 {
3903 va_list va2;
3904 va_copy(va2, *pArgs);
3905 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3906 va_end(va2);
3907 fCalledSomeone = true;
3908 }
3909 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3910}
3911
3912
3913/**
3914 * Registers a VM runtime error callback.
3915 *
3916 * @returns VBox status code.
3917 * @param pVM The VM handle.
3918 * @param pfnAtRuntimeError Pointer to callback.
3919 * @param pvUser User argument.
3920 * @thread Any.
3921 */
3922VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3923{
3924 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3925
3926 /*
3927 * Validate input.
3928 */
3929 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3930 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3931
3932 /*
3933 * Allocate a new record.
3934 */
3935 PUVM pUVM = pVM->pUVM;
3936 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3937 if (!pNew)
3938 return VERR_NO_MEMORY;
3939
3940 /* fill */
3941 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3942 pNew->pvUser = pvUser;
3943
3944 /* insert */
3945 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3946 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3947 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3948 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3949 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3950
3951 return VINF_SUCCESS;
3952}
3953
3954
3955/**
3956 * Deregisters a VM runtime error callback.
3957 *
3958 * @returns VBox status code.
3959 * @param pVM The VM handle.
3960 * @param pfnAtRuntimeError Pointer to callback.
3961 * @param pvUser User argument.
3962 * @thread Any.
3963 */
3964VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3965{
3966 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3967
3968 /*
3969 * Validate input.
3970 */
3971 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3972 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3973
3974 PUVM pUVM = pVM->pUVM;
3975 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3976
3977 /*
3978 * Search the list for the entry.
3979 */
3980 PVMATRUNTIMEERROR pPrev = NULL;
3981 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3982 while ( pCur
3983 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3984 || pCur->pvUser != pvUser))
3985 {
3986 pPrev = pCur;
3987 pCur = pCur->pNext;
3988 }
3989 if (!pCur)
3990 {
3991 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
3992 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3993 return VERR_FILE_NOT_FOUND;
3994 }
3995
3996 /*
3997 * Unlink it.
3998 */
3999 if (pPrev)
4000 {
4001 pPrev->pNext = pCur->pNext;
4002 if (!pCur->pNext)
4003 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
4004 }
4005 else
4006 {
4007 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
4008 if (!pCur->pNext)
4009 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
4010 }
4011
4012 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4013
4014 /*
4015 * Free it.
4016 */
4017 pCur->pfnAtRuntimeError = NULL;
4018 pCur->pNext = NULL;
4019 MMR3HeapFree(pCur);
4020
4021 return VINF_SUCCESS;
4022}
4023
4024
4025/**
4026 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
4027 * the state to FatalError(LS).
4028 *
4029 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
4030 * return code, see FNVMMEMTRENDEZVOUS.)
4031 *
4032 * @param pVM The VM handle.
4033 * @param pVCpu The VMCPU handle of the EMT.
4034 * @param pvUser Ignored.
4035 */
4036static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
4037{
4038 NOREF(pVCpu);
4039 Assert(!pvUser); NOREF(pvUser);
4040
4041 /*
4042 * The first EMT thru here changes the state.
4043 */
4044 if (pVCpu->idCpu == pVM->cCpus - 1)
4045 {
4046 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
4047 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
4048 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
4049 if (RT_FAILURE(rc))
4050 return rc;
4051 if (rc == 2)
4052 SSMR3Cancel(pVM);
4053
4054 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
4055 }
4056
4057 /* This'll make sure we get out of whereever we are (e.g. REM). */
4058 return VINF_EM_SUSPEND;
4059}
4060
4061
4062/**
4063 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
4064 *
4065 * This does the common parts after the error has been saved / retrieved.
4066 *
4067 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4068 *
4069 * @param pVM The VM handle.
4070 * @param fFlags The error flags.
4071 * @param pszErrorId Error ID string.
4072 * @param pszFormat Format string.
4073 * @param pVa Pointer to the format arguments.
4074 */
4075static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4076{
4077 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
4078
4079 /*
4080 * Take actions before the call.
4081 */
4082 int rc;
4083 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4084 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4085 vmR3SetRuntimeErrorChangeState, NULL);
4086 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4087 rc = VMR3Suspend(pVM);
4088 else
4089 rc = VINF_SUCCESS;
4090
4091 /*
4092 * Do the callback round.
4093 */
4094 PUVM pUVM = pVM->pUVM;
4095 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4096 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4097 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4098 {
4099 va_list va;
4100 va_copy(va, *pVa);
4101 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4102 va_end(va);
4103 }
4104 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4105
4106 return rc;
4107}
4108
4109
4110/**
4111 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4112 */
4113static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4114{
4115 va_list va;
4116 va_start(va, pszFormat);
4117 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4118 va_end(va);
4119 return rc;
4120}
4121
4122
4123/**
4124 * This is a worker function for RC and Ring-0 calls to VMSetError and
4125 * VMSetErrorV.
4126 *
4127 * The message is found in VMINT.
4128 *
4129 * @returns VBox status code, see VMSetRuntimeError.
4130 * @param pVM The VM handle.
4131 * @thread EMT.
4132 */
4133VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4134{
4135 VM_ASSERT_EMT(pVM);
4136 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4137
4138 /*
4139 * Unpack the error (if we managed to format one).
4140 */
4141 const char *pszErrorId = "SetRuntimeError";
4142 const char *pszMessage = "No message!";
4143 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4144 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4145 if (pErr)
4146 {
4147 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4148 if (pErr->offErrorId)
4149 pszErrorId = (const char *)pErr + pErr->offErrorId;
4150 if (pErr->offMessage)
4151 pszMessage = (const char *)pErr + pErr->offMessage;
4152 fFlags = pErr->fFlags;
4153 }
4154
4155 /*
4156 * Join cause with vmR3SetRuntimeErrorV.
4157 */
4158 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4159}
4160
4161
4162/**
4163 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4164 *
4165 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4166 *
4167 * @param pVM The VM handle.
4168 * @param fFlags The error flags.
4169 * @param pszErrorId Error ID string.
4170 * @param pszMessage The error message residing the MM heap.
4171 *
4172 * @thread EMT
4173 */
4174DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4175{
4176#if 0 /** @todo make copy of the error msg. */
4177 /*
4178 * Make a copy of the message.
4179 */
4180 va_list va2;
4181 va_copy(va2, *pVa);
4182 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4183 va_end(va2);
4184#endif
4185
4186 /*
4187 * Join paths with VMR3SetRuntimeErrorWorker.
4188 */
4189 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4190 MMR3HeapFree(pszMessage);
4191 return rc;
4192}
4193
4194
4195/**
4196 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4197 *
4198 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4199 *
4200 * @param pVM The VM handle.
4201 * @param fFlags The error flags.
4202 * @param pszErrorId Error ID string.
4203 * @param pszFormat Format string.
4204 * @param pVa Pointer to the format arguments.
4205 *
4206 * @thread EMT
4207 */
4208DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4209{
4210 /*
4211 * Make a copy of the message.
4212 */
4213 va_list va2;
4214 va_copy(va2, *pVa);
4215 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4216 va_end(va2);
4217
4218 /*
4219 * Join paths with VMR3SetRuntimeErrorWorker.
4220 */
4221 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4222}
4223
4224
4225/**
4226 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4227 *
4228 * This can be used avoid double error messages.
4229 *
4230 * @returns The runtime error count.
4231 * @param pVM The VM handle.
4232 */
4233VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
4234{
4235 return pVM->pUVM->vm.s.cRuntimeErrors;
4236}
4237
4238
4239/**
4240 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4241 *
4242 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4243 *
4244 * @param pVM The VM handle.
4245 */
4246VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4247{
4248 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4249 return pUVCpu
4250 ? pUVCpu->idCpu
4251 : NIL_VMCPUID;
4252}
4253
4254
4255/**
4256 * Returns the native handle of the current EMT VMCPU thread.
4257 *
4258 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4259 * @param pVM The VM handle.
4260 * @thread EMT
4261 */
4262VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4263{
4264 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4265
4266 if (!pUVCpu)
4267 return NIL_RTNATIVETHREAD;
4268
4269 return pUVCpu->vm.s.NativeThreadEMT;
4270}
4271
4272
4273/**
4274 * Returns the native handle of the current EMT VMCPU thread.
4275 *
4276 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4277 * @param pVM The VM handle.
4278 * @thread EMT
4279 */
4280VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4281{
4282 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4283
4284 if (!pUVCpu)
4285 return NIL_RTNATIVETHREAD;
4286
4287 return pUVCpu->vm.s.NativeThreadEMT;
4288}
4289
4290
4291/**
4292 * Returns the handle of the current EMT VMCPU thread.
4293 *
4294 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4295 * @param pVM The VM handle.
4296 * @thread EMT
4297 */
4298VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
4299{
4300 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4301
4302 if (!pUVCpu)
4303 return NIL_RTTHREAD;
4304
4305 return pUVCpu->vm.s.ThreadEMT;
4306}
4307
4308
4309/**
4310 * Returns the handle of the current EMT VMCPU thread.
4311 *
4312 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4313 * @param pVM The VM handle.
4314 * @thread EMT
4315 */
4316VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
4317{
4318 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4319
4320 if (!pUVCpu)
4321 return NIL_RTTHREAD;
4322
4323 return pUVCpu->vm.s.ThreadEMT;
4324}
4325
4326
4327/**
4328 * Return the package and core id of a CPU.
4329 *
4330 * @returns VBOX status code.
4331 * @param pVM The VM to operate on.
4332 * @param idCpu Virtual CPU to get the ID from.
4333 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4334 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4335 *
4336 */
4337VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PVM pVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4338{
4339 /*
4340 * Validate input.
4341 */
4342 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4343 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4344 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4345 if (idCpu >= pVM->cCpus)
4346 return VERR_INVALID_CPU_ID;
4347
4348 /*
4349 * Set return values.
4350 */
4351#ifdef VBOX_WITH_MULTI_CORE
4352 *pidCpuCore = idCpu;
4353 *pidCpuPackage = 0;
4354#else
4355 *pidCpuCore = 0;
4356 *pidCpuPackage = idCpu;
4357#endif
4358
4359 return VINF_SUCCESS;
4360}
4361
4362
4363/**
4364 * Worker for VMR3HotUnplugCpu.
4365 *
4366 * @returns VINF_EM_WAIT_SPIP (strict status code).
4367 * @param pVM The VM handle.
4368 * @param idCpu The current CPU.
4369 */
4370static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4371{
4372 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4373 VMCPU_ASSERT_EMT(pVCpu);
4374
4375 /*
4376 * Reset per CPU resources.
4377 *
4378 * Actually only needed for VT-x because the CPU seems to be still in some
4379 * paged mode and startup fails after a new hot plug event. SVM works fine
4380 * even without this.
4381 */
4382 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4383 PGMR3ResetUnpluggedCpu(pVM, pVCpu);
4384 PDMR3ResetCpu(pVCpu);
4385 TRPMR3ResetCpu(pVCpu);
4386 CPUMR3ResetCpu(pVCpu);
4387 EMR3ResetCpu(pVCpu);
4388 HWACCMR3ResetCpu(pVCpu);
4389 return VINF_EM_WAIT_SIPI;
4390}
4391
4392
4393/**
4394 * Hot-unplugs a CPU from the guest.
4395 *
4396 * @returns VBox status code.
4397 * @param pVM The VM to operate on.
4398 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4399 */
4400VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4401{
4402 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4403 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4404
4405 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4406 * broadcast requests. Just note down somewhere that the CPU is
4407 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4408 * it out of the EM loops when offline. */
4409 return VMR3ReqCallNoWaitU(pVM->pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4410}
4411
4412
4413/**
4414 * Hot-plugs a CPU on the guest.
4415 *
4416 * @returns VBox status code.
4417 * @param pVM The VM to operate on.
4418 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4419 */
4420VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
4421{
4422 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4423 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4424
4425 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4426 return VINF_SUCCESS;
4427}
4428
4429
4430/**
4431 * Changes the VMM execution cap.
4432 *
4433 * @returns VBox status code.
4434 * @param pVM The VM to operate on.
4435 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4436 * 100 is max performance (default).
4437 */
4438VMMR3DECL(int) VMR3SetCpuExecutionCap(PVM pVM, uint32_t uCpuExecutionCap)
4439{
4440 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4441 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4442
4443 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4444 /* Note: not called from EMT. */
4445 pVM->uCpuExecutionCap = uCpuExecutionCap;
4446 return VINF_SUCCESS;
4447}
4448
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette