VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 36013

Last change on this file since 36013 was 35810, checked in by vboxsync, 14 years ago

VMM: Replace most VERR_VERSION_MISMATCH by more specific error statuses. Translating the errors returned by device, driver and USB device constructors into specific ones for the benefit of old extension pack and misc use of the status.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 150.8 KB
Line 
1/* $Id: VM.cpp 35810 2011-02-01 13:00:24Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41/*******************************************************************************
42* Header Files *
43*******************************************************************************/
44#define LOG_GROUP LOG_GROUP_VM
45#include <VBox/vmm/cfgm.h>
46#include <VBox/vmm/vmm.h>
47#include <VBox/vmm/gvmm.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/cpum.h>
50#include <VBox/vmm/selm.h>
51#include <VBox/vmm/trpm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/vmm/pgm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/em.h>
57#include <VBox/vmm/rem.h>
58#include <VBox/vmm/tm.h>
59#include <VBox/vmm/stam.h>
60#include <VBox/vmm/patm.h>
61#include <VBox/vmm/csam.h>
62#include <VBox/vmm/iom.h>
63#include <VBox/vmm/ssm.h>
64#include <VBox/vmm/ftm.h>
65#include <VBox/vmm/hwaccm.h>
66#include "VMInternal.h"
67#include <VBox/vmm/vm.h>
68#include <VBox/vmm/uvm.h>
69
70#include <VBox/sup.h>
71#include <VBox/dbg.h>
72#include <VBox/err.h>
73#include <VBox/param.h>
74#include <VBox/log.h>
75#include <iprt/assert.h>
76#include <iprt/alloc.h>
77#include <iprt/asm.h>
78#include <iprt/env.h>
79#include <iprt/string.h>
80#include <iprt/time.h>
81#include <iprt/semaphore.h>
82#include <iprt/thread.h>
83
84
85/*******************************************************************************
86* Structures and Typedefs *
87*******************************************************************************/
88/**
89 * VM destruction callback registration record.
90 */
91typedef struct VMATDTOR
92{
93 /** Pointer to the next record in the list. */
94 struct VMATDTOR *pNext;
95 /** Pointer to the callback function. */
96 PFNVMATDTOR pfnAtDtor;
97 /** The user argument. */
98 void *pvUser;
99} VMATDTOR;
100/** Pointer to a VM destruction callback registration record. */
101typedef VMATDTOR *PVMATDTOR;
102
103
104/*******************************************************************************
105* Global Variables *
106*******************************************************************************/
107/** Pointer to the list of VMs. */
108static PUVM g_pUVMsHead = NULL;
109
110/** Pointer to the list of at VM destruction callbacks. */
111static PVMATDTOR g_pVMAtDtorHead = NULL;
112/** Lock the g_pVMAtDtorHead list. */
113#define VM_ATDTOR_LOCK() do { } while (0)
114/** Unlock the g_pVMAtDtorHead list. */
115#define VM_ATDTOR_UNLOCK() do { } while (0)
116
117
118/*******************************************************************************
119* Internal Functions *
120*******************************************************************************/
121static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
122static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
123static int vmR3InitRing3(PVM pVM, PUVM pUVM);
124static int vmR3InitRing0(PVM pVM);
125static int vmR3InitGC(PVM pVM);
126static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
127static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
128static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
129static void vmR3AtDtor(PVM pVM);
130static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
131static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
132static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
133static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
134static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
135static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
136
137
138/**
139 * Do global VMM init.
140 *
141 * @returns VBox status code.
142 */
143VMMR3DECL(int) VMR3GlobalInit(void)
144{
145 /*
146 * Only once.
147 */
148 static bool volatile s_fDone = false;
149 if (s_fDone)
150 return VINF_SUCCESS;
151
152 /*
153 * We're done.
154 */
155 s_fDone = true;
156 return VINF_SUCCESS;
157}
158
159
160
161/**
162 * Creates a virtual machine by calling the supplied configuration constructor.
163 *
164 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
165 * called to start the execution.
166 *
167 * @returns 0 on success.
168 * @returns VBox error code on failure.
169 * @param cCpus Number of virtual CPUs for the new VM.
170 * @param pVmm2UserMethods An optional method table that the VMM can use to
171 * make the user perform various action, like for
172 * instance state saving.
173 * @param pfnVMAtError Pointer to callback function for setting VM
174 * errors. This was added as an implicit call to
175 * VMR3AtErrorRegister() since there is no way the
176 * caller can get to the VM handle early enough to
177 * do this on its own.
178 * This is called in the context of an EMT.
179 * @param pvUserVM The user argument passed to pfnVMAtError.
180 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
181 * This is called in the context of an EMT0.
182 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
183 * @param ppVM Where to store the 'handle' of the created VM.
184 */
185VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
186 PFNVMATERROR pfnVMAtError, void *pvUserVM,
187 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
188 PVM *ppVM)
189{
190 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
191 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
192
193 if (pVmm2UserMethods)
194 {
195 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
196 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
197 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
198 AssertPtrReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
199 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
200 }
201 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
202 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
203 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
204
205 /*
206 * Because of the current hackiness of the applications
207 * we'll have to initialize global stuff from here.
208 * Later the applications will take care of this in a proper way.
209 */
210 static bool fGlobalInitDone = false;
211 if (!fGlobalInitDone)
212 {
213 int rc = VMR3GlobalInit();
214 if (RT_FAILURE(rc))
215 return rc;
216 fGlobalInitDone = true;
217 }
218
219 /*
220 * Validate input.
221 */
222 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
223
224 /*
225 * Create the UVM so we can register the at-error callback
226 * and consolidate a bit of cleanup code.
227 */
228 PUVM pUVM = NULL; /* shuts up gcc */
229 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
230 if (RT_FAILURE(rc))
231 return rc;
232 if (pfnVMAtError)
233 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
234 if (RT_SUCCESS(rc))
235 {
236 /*
237 * Initialize the support library creating the session for this VM.
238 */
239 rc = SUPR3Init(&pUVM->vm.s.pSession);
240 if (RT_SUCCESS(rc))
241 {
242 /*
243 * Call vmR3CreateU in the EMT thread and wait for it to finish.
244 *
245 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
246 * submitting a request to a specific VCPU without a pVM. So, to make
247 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
248 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
249 */
250 PVMREQ pReq;
251 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
252 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
253 if (RT_SUCCESS(rc))
254 {
255 rc = pReq->iStatus;
256 VMR3ReqFree(pReq);
257 if (RT_SUCCESS(rc))
258 {
259 /*
260 * Success!
261 */
262 *ppVM = pUVM->pVM;
263 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
264 return VINF_SUCCESS;
265 }
266 }
267 else
268 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
269
270 /*
271 * An error occurred during VM creation. Set the error message directly
272 * using the initial callback, as the callback list might not exist yet.
273 */
274 const char *pszError;
275 switch (rc)
276 {
277 case VERR_VMX_IN_VMX_ROOT_MODE:
278#ifdef RT_OS_LINUX
279 pszError = N_("VirtualBox can't operate in VMX root mode. "
280 "Please disable the KVM kernel extension, recompile your kernel and reboot");
281#else
282 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
283#endif
284 break;
285
286#ifndef RT_OS_DARWIN
287 case VERR_HWACCM_CONFIG_MISMATCH:
288 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
289 "This hardware extension is required by the VM configuration");
290 break;
291#endif
292
293 case VERR_SVM_IN_USE:
294#ifdef RT_OS_LINUX
295 pszError = N_("VirtualBox can't enable the AMD-V extension. "
296 "Please disable the KVM kernel extension, recompile your kernel and reboot");
297#else
298 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
299#endif
300 break;
301
302#ifdef RT_OS_LINUX
303 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
304 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
305 "that no kernel modules from an older version of VirtualBox exist. "
306 "Then try to recompile and reload the kernel modules by executing "
307 "'/etc/init.d/vboxdrv setup' as root");
308 break;
309#endif
310
311 case VERR_RAW_MODE_INVALID_SMP:
312 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
313 "VirtualBox requires this hardware extension to emulate more than one "
314 "guest CPU");
315 break;
316
317 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
318#ifdef RT_OS_LINUX
319 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
320 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
321 "the VT-x extension in the VM settings. Note that without VT-x you have "
322 "to reduce the number of guest CPUs to one");
323#else
324 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
325 "extension. Either upgrade your kernel or disable the VT-x extension in the "
326 "VM settings. Note that without VT-x you have to reduce the number of guest "
327 "CPUs to one");
328#endif
329 break;
330
331 case VERR_PDM_DEVICE_NOT_FOUND:
332 pszError = N_("A virtual device is configured in the VM settings but the device "
333 "implementation is missing.\n"
334 "A possible reason for this error is a missing extension pack. Note "
335 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
336 "support and remote desktop) are only available from an 'extension "
337 "pack' which must be downloaded and installed separately");
338 break;
339
340 default:
341 if (VMR3GetErrorCountU(pUVM) == 0)
342 pszError = RTErrGetFull(rc);
343 else
344 pszError = NULL; /* already set. */
345 break;
346 }
347 if (pszError)
348 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
349 }
350 else
351 {
352 /*
353 * An error occurred at support library initialization time (before the
354 * VM could be created). Set the error message directly using the
355 * initial callback, as the callback list doesn't exist yet.
356 */
357 const char *pszError;
358 switch (rc)
359 {
360 case VERR_VM_DRIVER_LOAD_ERROR:
361#ifdef RT_OS_LINUX
362 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
363 "was either not loaded or /dev/vboxdrv is not set up properly. "
364 "Re-setup the kernel module by executing "
365 "'/etc/init.d/vboxdrv setup' as root");
366#else
367 pszError = N_("VirtualBox kernel driver not loaded");
368#endif
369 break;
370 case VERR_VM_DRIVER_OPEN_ERROR:
371 pszError = N_("VirtualBox kernel driver cannot be opened");
372 break;
373 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
374#ifdef VBOX_WITH_HARDENING
375 /* This should only happen if the executable wasn't hardened - bad code/build. */
376 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
377 "Re-install VirtualBox. If you are building it yourself, you "
378 "should make sure it installed correctly and that the setuid "
379 "bit is set on the executables calling VMR3Create.");
380#else
381 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
382# if defined(RT_OS_DARWIN)
383 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
384 "If you have built VirtualBox yourself, make sure that you do not "
385 "have the vboxdrv KEXT from a different build or installation loaded.");
386# elif defined(RT_OS_LINUX)
387 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
388 "If you have built VirtualBox yourself, make sure that you do "
389 "not have the vboxdrv kernel module from a different build or "
390 "installation loaded. Also, make sure the vboxdrv udev rule gives "
391 "you the permission you need to access the device.");
392# elif defined(RT_OS_WINDOWS)
393 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
394# else /* solaris, freebsd, ++. */
395 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
396 "If you have built VirtualBox yourself, make sure that you do "
397 "not have the vboxdrv kernel module from a different install loaded.");
398# endif
399#endif
400 break;
401 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
402 case VERR_VM_DRIVER_NOT_INSTALLED:
403#ifdef RT_OS_LINUX
404 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
405 "was either not loaded or /dev/vboxdrv was not created for some "
406 "reason. Re-setup the kernel module by executing "
407 "'/etc/init.d/vboxdrv setup' as root");
408#else
409 pszError = N_("VirtualBox kernel driver not installed");
410#endif
411 break;
412 case VERR_NO_MEMORY:
413 pszError = N_("VirtualBox support library out of memory");
414 break;
415 case VERR_VERSION_MISMATCH:
416 case VERR_VM_DRIVER_VERSION_MISMATCH:
417 pszError = N_("The VirtualBox support driver which is running is from a different "
418 "version of VirtualBox. You can correct this by stopping all "
419 "running instances of VirtualBox and reinstalling the software.");
420 break;
421 default:
422 pszError = N_("Unknown error initializing kernel driver");
423 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
424 }
425 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
426 }
427 }
428
429 /* cleanup */
430 vmR3DestroyUVM(pUVM, 2000);
431 LogFlow(("VMR3Create: returns %Rrc\n", rc));
432 return rc;
433}
434
435
436/**
437 * Creates the UVM.
438 *
439 * This will not initialize the support library even if vmR3DestroyUVM
440 * will terminate that.
441 *
442 * @returns VBox status code.
443 * @param cCpus Number of virtual CPUs
444 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
445 * table.
446 * @param ppUVM Where to store the UVM pointer.
447 */
448static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
449{
450 uint32_t i;
451
452 /*
453 * Create and initialize the UVM.
454 */
455 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
456 AssertReturn(pUVM, VERR_NO_MEMORY);
457 pUVM->u32Magic = UVM_MAGIC;
458 pUVM->cCpus = cCpus;
459 pUVM->pVmm2UserMethods = pVmm2UserMethods;
460
461 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
462
463 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
464 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
465 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
466
467 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
468
469 /* Initialize the VMCPU array in the UVM. */
470 for (i = 0; i < cCpus; i++)
471 {
472 pUVM->aCpus[i].pUVM = pUVM;
473 pUVM->aCpus[i].idCpu = i;
474 }
475
476 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
477 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
478 AssertRC(rc);
479 if (RT_SUCCESS(rc))
480 {
481 /* Allocate a halt method event semaphore for each VCPU. */
482 for (i = 0; i < cCpus; i++)
483 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
484 for (i = 0; i < cCpus; i++)
485 {
486 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
487 if (RT_FAILURE(rc))
488 break;
489 }
490 if (RT_SUCCESS(rc))
491 {
492 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
493 if (RT_SUCCESS(rc))
494 {
495 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
496 if (RT_SUCCESS(rc))
497 {
498 /*
499 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
500 */
501 rc = STAMR3InitUVM(pUVM);
502 if (RT_SUCCESS(rc))
503 {
504 rc = MMR3InitUVM(pUVM);
505 if (RT_SUCCESS(rc))
506 {
507 rc = PDMR3InitUVM(pUVM);
508 if (RT_SUCCESS(rc))
509 {
510 /*
511 * Start the emulation threads for all VMCPUs.
512 */
513 for (i = 0; i < cCpus; i++)
514 {
515 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
516 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
517 cCpus > 1 ? "EMT-%u" : "EMT", i);
518 if (RT_FAILURE(rc))
519 break;
520
521 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
522 }
523
524 if (RT_SUCCESS(rc))
525 {
526 *ppUVM = pUVM;
527 return VINF_SUCCESS;
528 }
529
530 /* bail out. */
531 while (i-- > 0)
532 {
533 /** @todo rainy day: terminate the EMTs. */
534 }
535 PDMR3TermUVM(pUVM);
536 }
537 MMR3TermUVM(pUVM);
538 }
539 STAMR3TermUVM(pUVM);
540 }
541 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
542 }
543 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
544 }
545 }
546 for (i = 0; i < cCpus; i++)
547 {
548 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
549 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
550 }
551 RTTlsFree(pUVM->vm.s.idxTLS);
552 }
553 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
554 return rc;
555}
556
557
558/**
559 * Creates and initializes the VM.
560 *
561 * @thread EMT
562 */
563static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
564{
565 int rc = VINF_SUCCESS;
566
567 /*
568 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
569 */
570 rc = PDMR3LdrLoadVMMR0U(pUVM);
571 if (RT_FAILURE(rc))
572 {
573 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
574 * bird: what about moving the message down here? Main picks the first message, right? */
575 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
576 return rc; /* proper error message set later on */
577 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
578 }
579
580 /*
581 * Request GVMM to create a new VM for us.
582 */
583 GVMMCREATEVMREQ CreateVMReq;
584 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
585 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
586 CreateVMReq.pSession = pUVM->vm.s.pSession;
587 CreateVMReq.pVMR0 = NIL_RTR0PTR;
588 CreateVMReq.pVMR3 = NULL;
589 CreateVMReq.cCpus = cCpus;
590 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
591 if (RT_SUCCESS(rc))
592 {
593 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
594 AssertRelease(VALID_PTR(pVM));
595 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
596 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
597 AssertRelease(pVM->cCpus == cCpus);
598 AssertRelease(pVM->uCpuExecutionCap == 100);
599 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
600
601 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
602 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
603
604 /*
605 * Initialize the VM structure and our internal data (VMINT).
606 */
607 pVM->pUVM = pUVM;
608
609 for (VMCPUID i = 0; i < pVM->cCpus; i++)
610 {
611 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
612 pVM->aCpus[i].idCpu = i;
613 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
614 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
615 /* hNativeThreadR0 is initialized on EMT registration. */
616 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
617 pUVM->aCpus[i].pVM = pVM;
618 }
619
620
621 /*
622 * Init the configuration.
623 */
624 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
625 if (RT_SUCCESS(rc))
626 {
627 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
628 rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
629 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
630 pVM->fHWACCMEnabled = true;
631
632 /*
633 * If executing in fake suplib mode disable RR3 and RR0 in the config.
634 */
635 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
636 if (psz && !strcmp(psz, "fake"))
637 {
638 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
639 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
640 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
641 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
642 }
643
644 /*
645 * Make sure the CPU count in the config data matches.
646 */
647 if (RT_SUCCESS(rc))
648 {
649 uint32_t cCPUsCfg;
650 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
651 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
652 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
653 {
654 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
655 cCPUsCfg, cCpus));
656 rc = VERR_INVALID_PARAMETER;
657 }
658 }
659 if (RT_SUCCESS(rc))
660 {
661 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
662 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc));
663
664 /*
665 * Init the ring-3 components and ring-3 per cpu data, finishing it off
666 * by a relocation round (intermediate context finalization will do this).
667 */
668 rc = vmR3InitRing3(pVM, pUVM);
669 if (RT_SUCCESS(rc))
670 {
671 rc = PGMR3FinalizeMappings(pVM);
672 if (RT_SUCCESS(rc))
673 {
674
675 LogFlow(("Ring-3 init succeeded\n"));
676
677 /*
678 * Init the Ring-0 components.
679 */
680 rc = vmR3InitRing0(pVM);
681 if (RT_SUCCESS(rc))
682 {
683 /* Relocate again, because some switcher fixups depends on R0 init results. */
684 VMR3Relocate(pVM, 0);
685
686#ifdef VBOX_WITH_DEBUGGER
687 /*
688 * Init the tcp debugger console if we're building
689 * with debugger support.
690 */
691 void *pvUser = NULL;
692 rc = DBGCTcpCreate(pVM, &pvUser);
693 if ( RT_SUCCESS(rc)
694 || rc == VERR_NET_ADDRESS_IN_USE)
695 {
696 pUVM->vm.s.pvDBGC = pvUser;
697#endif
698 /*
699 * Init the Guest Context components.
700 */
701 rc = vmR3InitGC(pVM);
702 if (RT_SUCCESS(rc))
703 {
704 /*
705 * Now we can safely set the VM halt method to default.
706 */
707 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
708 if (RT_SUCCESS(rc))
709 {
710 /*
711 * Set the state and link into the global list.
712 */
713 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
714 pUVM->pNext = g_pUVMsHead;
715 g_pUVMsHead = pUVM;
716
717#ifdef LOG_ENABLED
718 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
719#endif
720 return VINF_SUCCESS;
721 }
722 }
723#ifdef VBOX_WITH_DEBUGGER
724 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
725 pUVM->vm.s.pvDBGC = NULL;
726 }
727#endif
728 //..
729 }
730 }
731 vmR3Destroy(pVM);
732 }
733 }
734 //..
735
736 /* Clean CFGM. */
737 int rc2 = CFGMR3Term(pVM);
738 AssertRC(rc2);
739 }
740
741 /*
742 * Do automatic cleanups while the VM structure is still alive and all
743 * references to it are still working.
744 */
745 PDMR3CritSectTerm(pVM);
746
747 /*
748 * Drop all references to VM and the VMCPU structures, then
749 * tell GVMM to destroy the VM.
750 */
751 pUVM->pVM = NULL;
752 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
753 {
754 pUVM->aCpus[i].pVM = NULL;
755 pUVM->aCpus[i].pVCpu = NULL;
756 }
757 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
758
759 if (pUVM->cCpus > 1)
760 {
761 /* Poke the other EMTs since they may have stale pVM and pVCpu references
762 on the stack (see VMR3WaitU for instance) if they've been awakened after
763 VM creation. */
764 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
765 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
766 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
767 }
768
769 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
770 AssertRC(rc2);
771 }
772 else
773 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
774
775 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
776 return rc;
777}
778
779
780/**
781 * Register the calling EMT with GVM.
782 *
783 * @returns VBox status code.
784 * @param pVM The VM handle.
785 * @param idCpu The Virtual CPU ID.
786 */
787static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
788{
789 Assert(VMMGetCpuId(pVM) == idCpu);
790 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
791 if (RT_FAILURE(rc))
792 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
793 return rc;
794}
795
796
797/**
798 * Initializes all R3 components of the VM
799 */
800static int vmR3InitRing3(PVM pVM, PUVM pUVM)
801{
802 int rc;
803
804 /*
805 * Register the other EMTs with GVM.
806 */
807 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
808 {
809 rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
810 if (RT_FAILURE(rc))
811 return rc;
812 }
813
814 /*
815 * Init all R3 components, the order here might be important.
816 */
817 rc = MMR3Init(pVM);
818 if (RT_SUCCESS(rc))
819 {
820 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
821 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
822 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
823 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
824 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
825 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
826 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
827 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
828 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
829 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
830 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
831 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
832 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
833 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
834
835 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
836 {
837 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
838 AssertRC(rc);
839 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
840 AssertRC(rc);
841 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/VM/CPU%d/Halt/BlockOverslept", idCpu);
842 AssertRC(rc);
843 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/VM/CPU%d/Halt/BlockInsomnia", idCpu);
844 AssertRC(rc);
845 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/VM/CPU%d/Halt/BlockOnTime", idCpu);
846 AssertRC(rc);
847 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
848 AssertRC(rc);
849 }
850
851 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
852 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
853 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
854 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
855 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
856 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
857 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
858 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
859
860 rc = CPUMR3Init(pVM);
861 if (RT_SUCCESS(rc))
862 {
863 rc = HWACCMR3Init(pVM);
864 if (RT_SUCCESS(rc))
865 {
866 rc = PGMR3Init(pVM);
867 if (RT_SUCCESS(rc))
868 {
869 rc = REMR3Init(pVM);
870 if (RT_SUCCESS(rc))
871 {
872 rc = MMR3InitPaging(pVM);
873 if (RT_SUCCESS(rc))
874 rc = TMR3Init(pVM);
875 if (RT_SUCCESS(rc))
876 {
877 rc = FTMR3Init(pVM);
878 if (RT_SUCCESS(rc))
879 {
880 rc = VMMR3Init(pVM);
881 if (RT_SUCCESS(rc))
882 {
883 rc = SELMR3Init(pVM);
884 if (RT_SUCCESS(rc))
885 {
886 rc = TRPMR3Init(pVM);
887 if (RT_SUCCESS(rc))
888 {
889 rc = CSAMR3Init(pVM);
890 if (RT_SUCCESS(rc))
891 {
892 rc = PATMR3Init(pVM);
893 if (RT_SUCCESS(rc))
894 {
895 rc = IOMR3Init(pVM);
896 if (RT_SUCCESS(rc))
897 {
898 rc = EMR3Init(pVM);
899 if (RT_SUCCESS(rc))
900 {
901 rc = DBGFR3Init(pVM);
902 if (RT_SUCCESS(rc))
903 {
904 rc = PDMR3Init(pVM);
905 if (RT_SUCCESS(rc))
906 {
907 rc = PGMR3InitDynMap(pVM);
908 if (RT_SUCCESS(rc))
909 rc = MMR3HyperInitFinalize(pVM);
910 if (RT_SUCCESS(rc))
911 rc = PATMR3InitFinalize(pVM);
912 if (RT_SUCCESS(rc))
913 rc = PGMR3InitFinalize(pVM);
914 if (RT_SUCCESS(rc))
915 rc = SELMR3InitFinalize(pVM);
916 if (RT_SUCCESS(rc))
917 rc = TMR3InitFinalize(pVM);
918 if (RT_SUCCESS(rc))
919 rc = REMR3InitFinalize(pVM);
920 if (RT_SUCCESS(rc))
921 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
922 if (RT_SUCCESS(rc))
923 {
924 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
925 return VINF_SUCCESS;
926 }
927 int rc2 = PDMR3Term(pVM);
928 AssertRC(rc2);
929 }
930 int rc2 = DBGFR3Term(pVM);
931 AssertRC(rc2);
932 }
933 int rc2 = EMR3Term(pVM);
934 AssertRC(rc2);
935 }
936 int rc2 = IOMR3Term(pVM);
937 AssertRC(rc2);
938 }
939 int rc2 = PATMR3Term(pVM);
940 AssertRC(rc2);
941 }
942 int rc2 = CSAMR3Term(pVM);
943 AssertRC(rc2);
944 }
945 int rc2 = TRPMR3Term(pVM);
946 AssertRC(rc2);
947 }
948 int rc2 = SELMR3Term(pVM);
949 AssertRC(rc2);
950 }
951 int rc2 = VMMR3Term(pVM);
952 AssertRC(rc2);
953 }
954 int rc2 = FTMR3Term(pVM);
955 AssertRC(rc2);
956 }
957 int rc2 = TMR3Term(pVM);
958 AssertRC(rc2);
959 }
960 int rc2 = REMR3Term(pVM);
961 AssertRC(rc2);
962 }
963 int rc2 = PGMR3Term(pVM);
964 AssertRC(rc2);
965 }
966 int rc2 = HWACCMR3Term(pVM);
967 AssertRC(rc2);
968 }
969 //int rc2 = CPUMR3Term(pVM);
970 //AssertRC(rc2);
971 }
972 /* MMR3Term is not called here because it'll kill the heap. */
973 }
974
975 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
976 return rc;
977}
978
979
980/**
981 * Initializes all R0 components of the VM
982 */
983static int vmR3InitRing0(PVM pVM)
984{
985 LogFlow(("vmR3InitRing0:\n"));
986
987 /*
988 * Check for FAKE suplib mode.
989 */
990 int rc = VINF_SUCCESS;
991 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
992 if (!psz || strcmp(psz, "fake"))
993 {
994 /*
995 * Call the VMMR0 component and let it do the init.
996 */
997 rc = VMMR3InitR0(pVM);
998 }
999 else
1000 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1001
1002 /*
1003 * Do notifications and return.
1004 */
1005 if (RT_SUCCESS(rc))
1006 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1007
1008 /** @todo Move this to the VMINITCOMPLETED_RING0 notification handler. */
1009 if (RT_SUCCESS(rc))
1010 {
1011 rc = HWACCMR3InitFinalizeR0(pVM);
1012 CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
1013 }
1014
1015 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1016 return rc;
1017}
1018
1019
1020/**
1021 * Initializes all GC components of the VM
1022 */
1023static int vmR3InitGC(PVM pVM)
1024{
1025 LogFlow(("vmR3InitGC:\n"));
1026
1027 /*
1028 * Check for FAKE suplib mode.
1029 */
1030 int rc = VINF_SUCCESS;
1031 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1032 if (!psz || strcmp(psz, "fake"))
1033 {
1034 /*
1035 * Call the VMMR0 component and let it do the init.
1036 */
1037 rc = VMMR3InitRC(pVM);
1038 }
1039 else
1040 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1041
1042 /*
1043 * Do notifications and return.
1044 */
1045 if (RT_SUCCESS(rc))
1046 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1047 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1048 return rc;
1049}
1050
1051
1052/**
1053 * Do init completed notifications.
1054 *
1055 * @returns VBox status code.
1056 * @param pVM The VM handle.
1057 * @param enmWhat What's completed.
1058 */
1059static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1060{
1061 int rc = VMMR3InitCompleted(pVM, enmWhat);
1062 if (RT_SUCCESS(rc))
1063 rc = HWACCMR3InitCompleted(pVM, enmWhat);
1064 return rc;
1065}
1066
1067
1068/**
1069 * Logger callback for inserting a custom prefix.
1070 *
1071 * @returns Number of chars written.
1072 * @param pLogger The logger.
1073 * @param pchBuf The output buffer.
1074 * @param cchBuf The output buffer size.
1075 * @param pvUser Pointer to the UVM structure.
1076 */
1077static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1078{
1079 AssertReturn(cchBuf >= 2, 0);
1080 PUVM pUVM = (PUVM)pvUser;
1081 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1082 if (pUVCpu)
1083 {
1084 static const char s_szHex[17] = "0123456789abcdef";
1085 VMCPUID const idCpu = pUVCpu->idCpu;
1086 pchBuf[1] = s_szHex[ idCpu & 15];
1087 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1088 }
1089 else
1090 {
1091 pchBuf[0] = 'x';
1092 pchBuf[1] = 'y';
1093 }
1094
1095 return 2;
1096}
1097
1098
1099/**
1100 * Calls the relocation functions for all VMM components so they can update
1101 * any GC pointers. When this function is called all the basic VM members
1102 * have been updated and the actual memory relocation have been done
1103 * by the PGM/MM.
1104 *
1105 * This is used both on init and on runtime relocations.
1106 *
1107 * @param pVM VM handle.
1108 * @param offDelta Relocation delta relative to old location.
1109 */
1110VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1111{
1112 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1113
1114 /*
1115 * The order here is very important!
1116 */
1117 PGMR3Relocate(pVM, offDelta);
1118 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1119 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1120 CPUMR3Relocate(pVM);
1121 HWACCMR3Relocate(pVM);
1122 SELMR3Relocate(pVM);
1123 VMMR3Relocate(pVM, offDelta);
1124 SELMR3Relocate(pVM); /* !hack! fix stack! */
1125 TRPMR3Relocate(pVM, offDelta);
1126 PATMR3Relocate(pVM);
1127 CSAMR3Relocate(pVM, offDelta);
1128 IOMR3Relocate(pVM, offDelta);
1129 EMR3Relocate(pVM);
1130 TMR3Relocate(pVM, offDelta);
1131 DBGFR3Relocate(pVM, offDelta);
1132 PDMR3Relocate(pVM, offDelta);
1133}
1134
1135
1136/**
1137 * EMT rendezvous worker for VMR3PowerOn.
1138 *
1139 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1140 * code, see FNVMMEMTRENDEZVOUS.)
1141 *
1142 * @param pVM The VM handle.
1143 * @param pVCpu The VMCPU handle of the EMT.
1144 * @param pvUser Ignored.
1145 */
1146static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1147{
1148 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1149 Assert(!pvUser); NOREF(pvUser);
1150
1151 /*
1152 * The first thread thru here tries to change the state. We shouldn't be
1153 * called again if this fails.
1154 */
1155 if (pVCpu->idCpu == pVM->cCpus - 1)
1156 {
1157 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1158 if (RT_FAILURE(rc))
1159 return rc;
1160 }
1161
1162 VMSTATE enmVMState = VMR3GetState(pVM);
1163 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1164 ("%s\n", VMR3GetStateName(enmVMState)),
1165 VERR_INTERNAL_ERROR_4);
1166
1167 /*
1168 * All EMTs changes their state to started.
1169 */
1170 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1171
1172 /*
1173 * EMT(0) is last thru here and it will make the notification calls
1174 * and advance the state.
1175 */
1176 if (pVCpu->idCpu == 0)
1177 {
1178 PDMR3PowerOn(pVM);
1179 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1180 }
1181
1182 return VINF_SUCCESS;
1183}
1184
1185
1186/**
1187 * Powers on the virtual machine.
1188 *
1189 * @returns VBox status code.
1190 *
1191 * @param pVM The VM to power on.
1192 *
1193 * @thread Any thread.
1194 * @vmstate Created
1195 * @vmstateto PoweringOn+Running
1196 */
1197VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1198{
1199 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1200 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1201
1202 /*
1203 * Gather all the EMTs to reduce the init TSC drift and keep
1204 * the state changing APIs a bit uniform.
1205 */
1206 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1207 vmR3PowerOn, NULL);
1208 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1209 return rc;
1210}
1211
1212
1213/**
1214 * Does the suspend notifications.
1215 *
1216 * @param pVM The VM handle.
1217 * @thread EMT(0)
1218 */
1219static void vmR3SuspendDoWork(PVM pVM)
1220{
1221 PDMR3Suspend(pVM);
1222}
1223
1224
1225/**
1226 * EMT rendezvous worker for VMR3Suspend.
1227 *
1228 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1229 * return code, see FNVMMEMTRENDEZVOUS.)
1230 *
1231 * @param pVM The VM handle.
1232 * @param pVCpu The VMCPU handle of the EMT.
1233 * @param pvUser Ignored.
1234 */
1235static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1236{
1237 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1238 Assert(!pvUser); NOREF(pvUser);
1239
1240 /*
1241 * The first EMT switches the state to suspending. If this fails because
1242 * something was racing us in one way or the other, there will be no more
1243 * calls and thus the state assertion below is not going to annoy anyone.
1244 */
1245 if (pVCpu->idCpu == pVM->cCpus - 1)
1246 {
1247 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1248 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1249 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1250 if (RT_FAILURE(rc))
1251 return rc;
1252 }
1253
1254 VMSTATE enmVMState = VMR3GetState(pVM);
1255 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1256 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1257 ("%s\n", VMR3GetStateName(enmVMState)),
1258 VERR_INTERNAL_ERROR_4);
1259
1260 /*
1261 * EMT(0) does the actually suspending *after* all the other CPUs have
1262 * been thru here.
1263 */
1264 if (pVCpu->idCpu == 0)
1265 {
1266 vmR3SuspendDoWork(pVM);
1267
1268 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1269 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1270 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1271 if (RT_FAILURE(rc))
1272 return VERR_INTERNAL_ERROR_3;
1273 }
1274
1275 return VINF_EM_SUSPEND;
1276}
1277
1278
1279/**
1280 * Suspends a running VM.
1281 *
1282 * @returns VBox status code. When called on EMT, this will be a strict status
1283 * code that has to be propagated up the call stack.
1284 *
1285 * @param pVM The VM to suspend.
1286 *
1287 * @thread Any thread.
1288 * @vmstate Running or RunningLS
1289 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1290 */
1291VMMR3DECL(int) VMR3Suspend(PVM pVM)
1292{
1293 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1294 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1295
1296 /*
1297 * Gather all the EMTs to make sure there are no races before
1298 * changing the VM state.
1299 */
1300 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1301 vmR3Suspend, NULL);
1302 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1303 return rc;
1304}
1305
1306
1307/**
1308 * EMT rendezvous worker for VMR3Resume.
1309 *
1310 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1311 * return code, see FNVMMEMTRENDEZVOUS.)
1312 *
1313 * @param pVM The VM handle.
1314 * @param pVCpu The VMCPU handle of the EMT.
1315 * @param pvUser Ignored.
1316 */
1317static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1318{
1319 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1320 Assert(!pvUser); NOREF(pvUser);
1321
1322 /*
1323 * The first thread thru here tries to change the state. We shouldn't be
1324 * called again if this fails.
1325 */
1326 if (pVCpu->idCpu == pVM->cCpus - 1)
1327 {
1328 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1329 if (RT_FAILURE(rc))
1330 return rc;
1331 }
1332
1333 VMSTATE enmVMState = VMR3GetState(pVM);
1334 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1335 ("%s\n", VMR3GetStateName(enmVMState)),
1336 VERR_INTERNAL_ERROR_4);
1337
1338#if 0
1339 /*
1340 * All EMTs changes their state to started.
1341 */
1342 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1343#endif
1344
1345 /*
1346 * EMT(0) is last thru here and it will make the notification calls
1347 * and advance the state.
1348 */
1349 if (pVCpu->idCpu == 0)
1350 {
1351 PDMR3Resume(pVM);
1352 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1353 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1354 }
1355
1356 return VINF_EM_RESUME;
1357}
1358
1359
1360/**
1361 * Resume VM execution.
1362 *
1363 * @returns VBox status code. When called on EMT, this will be a strict status
1364 * code that has to be propagated up the call stack.
1365 *
1366 * @param pVM The VM to resume.
1367 *
1368 * @thread Any thread.
1369 * @vmstate Suspended
1370 * @vmstateto Running
1371 */
1372VMMR3DECL(int) VMR3Resume(PVM pVM)
1373{
1374 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1375 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1376
1377 /*
1378 * Gather all the EMTs to make sure there are no races before
1379 * changing the VM state.
1380 */
1381 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1382 vmR3Resume, NULL);
1383 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1384 return rc;
1385}
1386
1387
1388/**
1389 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1390 * after the live step has been completed.
1391 *
1392 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1393 * return code, see FNVMMEMTRENDEZVOUS.)
1394 *
1395 * @param pVM The VM handle.
1396 * @param pVCpu The VMCPU handle of the EMT.
1397 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1398 */
1399static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1400{
1401 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1402 bool *pfSuspended = (bool *)pvUser;
1403
1404 /*
1405 * The first thread thru here tries to change the state. We shouldn't be
1406 * called again if this fails.
1407 */
1408 if (pVCpu->idCpu == pVM->cCpus - 1U)
1409 {
1410 PUVM pUVM = pVM->pUVM;
1411 int rc;
1412
1413 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1414 VMSTATE enmVMState = pVM->enmVMState;
1415 switch (enmVMState)
1416 {
1417 case VMSTATE_RUNNING_LS:
1418 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS);
1419 rc = VINF_SUCCESS;
1420 break;
1421
1422 case VMSTATE_SUSPENDED_EXT_LS:
1423 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1424 rc = VINF_SUCCESS;
1425 break;
1426
1427 case VMSTATE_DEBUGGING_LS:
1428 rc = VERR_TRY_AGAIN;
1429 break;
1430
1431 case VMSTATE_OFF_LS:
1432 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS);
1433 rc = VERR_SSM_LIVE_POWERED_OFF;
1434 break;
1435
1436 case VMSTATE_FATAL_ERROR_LS:
1437 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS);
1438 rc = VERR_SSM_LIVE_FATAL_ERROR;
1439 break;
1440
1441 case VMSTATE_GURU_MEDITATION_LS:
1442 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS);
1443 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1444 break;
1445
1446 case VMSTATE_POWERING_OFF_LS:
1447 case VMSTATE_SUSPENDING_EXT_LS:
1448 case VMSTATE_RESETTING_LS:
1449 default:
1450 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1451 rc = VERR_INTERNAL_ERROR_3;
1452 break;
1453 }
1454 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1455 if (RT_FAILURE(rc))
1456 {
1457 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1458 return rc;
1459 }
1460 }
1461
1462 VMSTATE enmVMState = VMR3GetState(pVM);
1463 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1464 ("%s\n", VMR3GetStateName(enmVMState)),
1465 VERR_INTERNAL_ERROR_4);
1466
1467 /*
1468 * Only EMT(0) have work to do since it's last thru here.
1469 */
1470 if (pVCpu->idCpu == 0)
1471 {
1472 vmR3SuspendDoWork(pVM);
1473 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1474 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1475 if (RT_FAILURE(rc))
1476 return VERR_INTERNAL_ERROR_3;
1477
1478 *pfSuspended = true;
1479 }
1480
1481 return VINF_EM_SUSPEND;
1482}
1483
1484
1485/**
1486 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1487 * SSMR3LiveDoStep1 failure.
1488 *
1489 * Doing this as a rendezvous operation avoids all annoying transition
1490 * states.
1491 *
1492 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1493 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1494 *
1495 * @param pVM The VM handle.
1496 * @param pVCpu The VMCPU handle of the EMT.
1497 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1498 */
1499static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1500{
1501 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1502 bool *pfSuspended = (bool *)pvUser;
1503 NOREF(pVCpu);
1504
1505 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1506 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1507 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1508 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1509 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1510 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1511 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1512 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1513 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1514 if (rc == 1)
1515 rc = VERR_SSM_LIVE_POWERED_OFF;
1516 else if (rc == 2)
1517 rc = VERR_SSM_LIVE_FATAL_ERROR;
1518 else if (rc == 3)
1519 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1520 else if (rc == 4)
1521 {
1522 *pfSuspended = true;
1523 rc = VINF_SUCCESS;
1524 }
1525 else if (rc > 0)
1526 rc = VINF_SUCCESS;
1527 return rc;
1528}
1529
1530
1531/**
1532 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1533 *
1534 * @returns VBox status code.
1535 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1536 *
1537 * @param pVM The VM handle.
1538 * @param pSSM The handle of saved state operation.
1539 *
1540 * @thread EMT(0)
1541 */
1542static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1543{
1544 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1545 VM_ASSERT_EMT0(pVM);
1546
1547 /*
1548 * Advance the state and mark if VMR3Suspend was called.
1549 */
1550 int rc = VINF_SUCCESS;
1551 VMSTATE enmVMState = VMR3GetState(pVM);
1552 if (enmVMState == VMSTATE_SUSPENDED_LS)
1553 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1554 else
1555 {
1556 if (enmVMState != VMSTATE_SAVING)
1557 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1558 rc = VINF_SSM_LIVE_SUSPENDED;
1559 }
1560
1561 /*
1562 * Finish up and release the handle. Careful with the status codes.
1563 */
1564 int rc2 = SSMR3LiveDoStep2(pSSM);
1565 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1566 rc = rc2;
1567
1568 rc2 = SSMR3LiveDone(pSSM);
1569 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1570 rc = rc2;
1571
1572 /*
1573 * Advance to the final state and return.
1574 */
1575 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1576 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1577 return rc;
1578}
1579
1580
1581/**
1582 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1583 * SSMR3LiveSave.
1584 *
1585 * @returns VBox status code.
1586 *
1587 * @param pVM The VM handle.
1588 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1589 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1590 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1591 * @param pvStreamOpsUser The user argument to the stream methods.
1592 * @param enmAfter What to do afterwards.
1593 * @param pfnProgress Progress callback. Optional.
1594 * @param pvProgressUser User argument for the progress callback.
1595 * @param ppSSM Where to return the saved state handle in case of a
1596 * live snapshot scenario.
1597 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1598 *
1599 * @thread EMT
1600 */
1601static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1602 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM,
1603 bool fSkipStateChanges)
1604{
1605 int rc = VINF_SUCCESS;
1606
1607 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1608 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1609
1610 /*
1611 * Validate input.
1612 */
1613 AssertPtrNull(pszFilename);
1614 AssertPtrNull(pStreamOps);
1615 AssertPtr(pVM);
1616 Assert( enmAfter == SSMAFTER_DESTROY
1617 || enmAfter == SSMAFTER_CONTINUE
1618 || enmAfter == SSMAFTER_TELEPORT);
1619 AssertPtr(ppSSM);
1620 *ppSSM = NULL;
1621
1622 /*
1623 * Change the state and perform/start the saving.
1624 */
1625 if (!fSkipStateChanges)
1626 {
1627 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1628 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1629 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1630 }
1631 else
1632 {
1633 Assert(enmAfter != SSMAFTER_TELEPORT);
1634 rc = 1;
1635 }
1636
1637 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1638 {
1639 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1640 if (!fSkipStateChanges)
1641 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1642 }
1643 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1644 {
1645 Assert(!fSkipStateChanges);
1646 if (enmAfter == SSMAFTER_TELEPORT)
1647 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1648 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1649 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1650 /* (We're not subject to cancellation just yet.) */
1651 }
1652 else
1653 Assert(RT_FAILURE(rc));
1654 return rc;
1655}
1656
1657
1658/**
1659 * Common worker for VMR3Save and VMR3Teleport.
1660 *
1661 * @returns VBox status code.
1662 *
1663 * @param pVM The VM handle.
1664 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1665 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1666 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1667 * @param pvStreamOpsUser The user argument to the stream methods.
1668 * @param enmAfter What to do afterwards.
1669 * @param pfnProgress Progress callback. Optional.
1670 * @param pvProgressUser User argument for the progress callback.
1671 * @param pfSuspended Set if we suspended the VM.
1672 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1673 *
1674 * @thread Non-EMT
1675 */
1676static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1677 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1678 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended,
1679 bool fSkipStateChanges)
1680{
1681 /*
1682 * Request the operation in EMT(0).
1683 */
1684 PSSMHANDLE pSSM;
1685 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1686 (PFNRT)vmR3Save, 10, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1687 enmAfter, pfnProgress, pvProgressUser, &pSSM, fSkipStateChanges);
1688 if ( RT_SUCCESS(rc)
1689 && pSSM)
1690 {
1691 Assert(!fSkipStateChanges);
1692
1693 /*
1694 * Live snapshot.
1695 *
1696 * The state handling here is kind of tricky, doing it on EMT(0) helps
1697 * a bit. See the VMSTATE diagram for details.
1698 */
1699 rc = SSMR3LiveDoStep1(pSSM);
1700 if (RT_SUCCESS(rc))
1701 {
1702 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1703 for (;;)
1704 {
1705 /* Try suspend the VM. */
1706 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1707 vmR3LiveDoSuspend, pfSuspended);
1708 if (rc != VERR_TRY_AGAIN)
1709 break;
1710
1711 /* Wait for the state to change. */
1712 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1713 }
1714 if (RT_SUCCESS(rc))
1715 rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1716 else
1717 {
1718 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1719 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1720 }
1721 }
1722 else
1723 {
1724 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1725 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1726
1727 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1728 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1729 rc = rc2;
1730 }
1731 }
1732
1733 return rc;
1734}
1735
1736
1737/**
1738 * Save current VM state.
1739 *
1740 * Can be used for both saving the state and creating snapshots.
1741 *
1742 * When called for a VM in the Running state, the saved state is created live
1743 * and the VM is only suspended when the final part of the saving is preformed.
1744 * The VM state will not be restored to Running in this case and it's up to the
1745 * caller to call VMR3Resume if this is desirable. (The rational is that the
1746 * caller probably wish to reconfigure the disks before resuming the VM.)
1747 *
1748 * @returns VBox status code.
1749 *
1750 * @param pVM The VM which state should be saved.
1751 * @param pszFilename The name of the save state file.
1752 * @param pStreamOps The stream methods.
1753 * @param pvStreamOpsUser The user argument to the stream methods.
1754 * @param fContinueAfterwards Whether continue execution afterwards or not.
1755 * When in doubt, set this to true.
1756 * @param pfnProgress Progress callback. Optional.
1757 * @param pvUser User argument for the progress callback.
1758 * @param pfSuspended Set if we suspended the VM.
1759 *
1760 * @thread Non-EMT.
1761 * @vmstate Suspended or Running
1762 * @vmstateto Saving+Suspended or
1763 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1764 */
1765VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
1766{
1767 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1768 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1769
1770 /*
1771 * Validate input.
1772 */
1773 AssertPtr(pfSuspended);
1774 *pfSuspended = false;
1775 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1776 VM_ASSERT_OTHER_THREAD(pVM);
1777 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1778 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1779 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1780
1781 /*
1782 * Join paths with VMR3Teleport.
1783 */
1784 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1785 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1786 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1787 enmAfter, pfnProgress, pvUser, pfSuspended,
1788 false /* fSkipStateChanges */);
1789 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1790 return rc;
1791}
1792
1793/**
1794 * Save current VM state (used by FTM)
1795 *
1796 * Can be used for both saving the state and creating snapshots.
1797 *
1798 * When called for a VM in the Running state, the saved state is created live
1799 * and the VM is only suspended when the final part of the saving is preformed.
1800 * The VM state will not be restored to Running in this case and it's up to the
1801 * caller to call VMR3Resume if this is desirable. (The rational is that the
1802 * caller probably wish to reconfigure the disks before resuming the VM.)
1803 *
1804 * @returns VBox status code.
1805 *
1806 * @param pVM The VM which state should be saved.
1807 * @param pStreamOps The stream methods.
1808 * @param pvStreamOpsUser The user argument to the stream methods.
1809 * @param pfSuspended Set if we suspended the VM.
1810 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1811 *
1812 * @thread Any
1813 * @vmstate Suspended or Running
1814 * @vmstateto Saving+Suspended or
1815 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1816 */
1817VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended,
1818 bool fSkipStateChanges)
1819{
1820 LogFlow(("VMR3SaveFT: pVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
1821 pVM, pStreamOps, pvStreamOpsUser, pfSuspended));
1822
1823 /*
1824 * Validate input.
1825 */
1826 AssertPtr(pfSuspended);
1827 *pfSuspended = false;
1828 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1829 AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
1830
1831 /*
1832 * Join paths with VMR3Teleport.
1833 */
1834 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1835 NULL, pStreamOps, pvStreamOpsUser,
1836 SSMAFTER_CONTINUE, NULL, NULL, pfSuspended,
1837 fSkipStateChanges);
1838 LogFlow(("VMR3SaveFT: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1839 return rc;
1840}
1841
1842
1843/**
1844 * Teleport the VM (aka live migration).
1845 *
1846 * @returns VBox status code.
1847 *
1848 * @param pVM The VM which state should be saved.
1849 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1850 * @param pStreamOps The stream methods.
1851 * @param pvStreamOpsUser The user argument to the stream methods.
1852 * @param pfnProgress Progress callback. Optional.
1853 * @param pvProgressUser User argument for the progress callback.
1854 * @param pfSuspended Set if we suspended the VM.
1855 *
1856 * @thread Non-EMT.
1857 * @vmstate Suspended or Running
1858 * @vmstateto Saving+Suspended or
1859 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1860 */
1861VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1862 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1863{
1864 LogFlow(("VMR3Teleport: pVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1865 pVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1866
1867 /*
1868 * Validate input.
1869 */
1870 AssertPtr(pfSuspended);
1871 *pfSuspended = false;
1872 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1873 VM_ASSERT_OTHER_THREAD(pVM);
1874 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1875 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1876
1877 /*
1878 * Join paths with VMR3Save.
1879 */
1880 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
1881 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1882 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended,
1883 false /* fSkipStateChanges */);
1884 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1885 return rc;
1886}
1887
1888
1889
1890/**
1891 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1892 *
1893 * @returns VBox status code.
1894 *
1895 * @param pVM The VM handle.
1896 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1897 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1898 * @param pvStreamOpsUser The user argument to the stream methods.
1899 * @param pfnProgress Progress callback. Optional.
1900 * @param pvUser User argument for the progress callback.
1901 * @param fTeleporting Indicates whether we're teleporting or not.
1902 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1903 *
1904 * @thread EMT.
1905 */
1906static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1907 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
1908 bool fSkipStateChanges)
1909{
1910 int rc = VINF_SUCCESS;
1911
1912 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1913 pVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1914
1915 /*
1916 * Validate input (paranoia).
1917 */
1918 AssertPtr(pVM);
1919 AssertPtrNull(pszFilename);
1920 AssertPtrNull(pStreamOps);
1921 AssertPtrNull(pfnProgress);
1922
1923 if (!fSkipStateChanges)
1924 {
1925 /*
1926 * Change the state and perform the load.
1927 *
1928 * Always perform a relocation round afterwards to make sure hypervisor
1929 * selectors and such are correct.
1930 */
1931 rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1932 VMSTATE_LOADING, VMSTATE_CREATED,
1933 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1934 if (RT_FAILURE(rc))
1935 return rc;
1936 }
1937 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1938
1939 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pVM);
1940 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1941 if (RT_SUCCESS(rc))
1942 {
1943 VMR3Relocate(pVM, 0 /*offDelta*/);
1944 if (!fSkipStateChanges)
1945 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1946 }
1947 else
1948 {
1949 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1950 if (!fSkipStateChanges)
1951 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1952
1953 if (cErrorsPriorToSave == VMR3GetErrorCount(pVM))
1954 rc = VMSetError(pVM, rc, RT_SRC_POS,
1955 N_("Unable to restore the virtual machine's saved state from '%s'. "
1956 "It may be damaged or from an older version of VirtualBox. "
1957 "Please discard the saved state before starting the virtual machine"),
1958 pszFilename);
1959 }
1960
1961 return rc;
1962}
1963
1964
1965/**
1966 * Loads a VM state into a newly created VM or a one that is suspended.
1967 *
1968 * To restore a saved state on VM startup, call this function and then resume
1969 * the VM instead of powering it on.
1970 *
1971 * @returns VBox status code.
1972 *
1973 * @param pVM The VM handle.
1974 * @param pszFilename The name of the save state file.
1975 * @param pfnProgress Progress callback. Optional.
1976 * @param pvUser User argument for the progress callback.
1977 *
1978 * @thread Any thread.
1979 * @vmstate Created, Suspended
1980 * @vmstateto Loading+Suspended
1981 */
1982VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
1983{
1984 LogFlow(("VMR3LoadFromFile: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
1985 pVM, pszFilename, pszFilename, pfnProgress, pvUser));
1986
1987 /*
1988 * Validate input.
1989 */
1990 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1991 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1992
1993 /*
1994 * Forward the request to EMT(0). No need to setup a rendezvous here
1995 * since there is no execution taking place when this call is allowed.
1996 */
1997 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
1998 pVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
1999 false /*fTeleporting*/, false /* fSkipStateChanges */);
2000 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
2001 return rc;
2002}
2003
2004
2005/**
2006 * VMR3LoadFromFile for arbitrary file streams.
2007 *
2008 * @returns VBox status code.
2009 *
2010 * @param pVM The VM handle.
2011 * @param pStreamOps The stream methods.
2012 * @param pvStreamOpsUser The user argument to the stream methods.
2013 * @param pfnProgress Progress callback. Optional.
2014 * @param pvProgressUser User argument for the progress callback.
2015 *
2016 * @thread Any thread.
2017 * @vmstate Created, Suspended
2018 * @vmstateto Loading+Suspended
2019 */
2020VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2021 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
2022{
2023 LogFlow(("VMR3LoadFromStream: pVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
2024 pVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2025
2026 /*
2027 * Validate input.
2028 */
2029 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2030 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2031
2032 /*
2033 * Forward the request to EMT(0). No need to setup a rendezvous here
2034 * since there is no execution taking place when this call is allowed.
2035 */
2036 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2037 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
2038 true /*fTeleporting*/, false /* fSkipStateChanges */);
2039 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2040 return rc;
2041}
2042
2043
2044/**
2045 * VMR3LoadFromFileFT for arbitrary file streams.
2046 *
2047 * @returns VBox status code.
2048 *
2049 * @param pVM The VM handle.
2050 * @param pStreamOps The stream methods.
2051 * @param pvStreamOpsUser The user argument to the stream methods.
2052 * @param pfnProgress Progress callback. Optional.
2053 * @param pvProgressUser User argument for the progress callback.
2054 *
2055 * @thread Any thread.
2056 * @vmstate Created, Suspended
2057 * @vmstateto Loading+Suspended
2058 */
2059VMMR3DECL(int) VMR3LoadFromStreamFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
2060{
2061 LogFlow(("VMR3LoadFromStreamFT: pVM=%p pStreamOps=%p pvStreamOpsUser=%p\n",
2062 pVM, pStreamOps, pvStreamOpsUser));
2063
2064 /*
2065 * Validate input.
2066 */
2067 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2068 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2069
2070 /*
2071 * Forward the request to EMT(0). No need to setup a rendezvous here
2072 * since there is no execution taking place when this call is allowed.
2073 */
2074 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2075 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
2076 true /*fTeleporting*/, true /* fSkipStateChanges */);
2077 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2078 return rc;
2079}
2080
2081/**
2082 * EMT rendezvous worker for VMR3PowerOff.
2083 *
2084 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
2085 * return code, see FNVMMEMTRENDEZVOUS.)
2086 *
2087 * @param pVM The VM handle.
2088 * @param pVCpu The VMCPU handle of the EMT.
2089 * @param pvUser Ignored.
2090 */
2091static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
2092{
2093 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
2094 Assert(!pvUser); NOREF(pvUser);
2095
2096 /*
2097 * The first EMT thru here will change the state to PoweringOff.
2098 */
2099 if (pVCpu->idCpu == pVM->cCpus - 1)
2100 {
2101 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
2102 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
2103 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
2104 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
2105 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
2106 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2107 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2108 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2109 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2110 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2111 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2112 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2113 if (RT_FAILURE(rc))
2114 return rc;
2115 if (rc >= 7)
2116 SSMR3Cancel(pVM);
2117 }
2118
2119 /*
2120 * Check the state.
2121 */
2122 VMSTATE enmVMState = VMR3GetState(pVM);
2123 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2124 || enmVMState == VMSTATE_POWERING_OFF_LS,
2125 ("%s\n", VMR3GetStateName(enmVMState)),
2126 VERR_VM_INVALID_VM_STATE);
2127
2128 /*
2129 * EMT(0) does the actual power off work here *after* all the other EMTs
2130 * have been thru and entered the STOPPED state.
2131 */
2132 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2133 if (pVCpu->idCpu == 0)
2134 {
2135 /*
2136 * For debugging purposes, we will log a summary of the guest state at this point.
2137 */
2138 if (enmVMState != VMSTATE_GURU_MEDITATION)
2139 {
2140 /** @todo SMP support? */
2141 /** @todo make the state dumping at VMR3PowerOff optional. */
2142 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2143 RTLogRelPrintf("****************** Guest state at power off ******************\n");
2144 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2145 RTLogRelPrintf("***\n");
2146 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
2147 RTLogRelPrintf("***\n");
2148 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2149 RTLogRelPrintf("***\n");
2150 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2151 /** @todo dump guest call stack. */
2152#if 1 // "temporary" while debugging #1589
2153 RTLogRelPrintf("***\n");
2154 uint32_t esp = CPUMGetGuestESP(pVCpu);
2155 if ( CPUMGetGuestSS(pVCpu) == 0
2156 && esp < _64K)
2157 {
2158 uint8_t abBuf[PAGE_SIZE];
2159 RTLogRelPrintf("***\n"
2160 "ss:sp=0000:%04x ", esp);
2161 uint32_t Start = esp & ~(uint32_t)63;
2162 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
2163 if (RT_SUCCESS(rc))
2164 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
2165 "%.*Rhxd\n",
2166 Start, Start + 0x100 - 1,
2167 0x100, abBuf);
2168 else
2169 RTLogRelPrintf("rc=%Rrc\n", rc);
2170
2171 /* grub ... */
2172 if (esp < 0x2000 && esp > 0x1fc0)
2173 {
2174 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
2175 if (RT_SUCCESS(rc))
2176 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
2177 "%.*Rhxd\n",
2178 0x800, abBuf);
2179 }
2180 /* microsoft cdrom hang ... */
2181 if (true)
2182 {
2183 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
2184 if (RT_SUCCESS(rc))
2185 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
2186 "%.*Rhxd\n",
2187 0x200, abBuf);
2188 }
2189 }
2190#endif
2191 RTLogRelSetBuffering(fOldBuffered);
2192 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2193 }
2194
2195 /*
2196 * Perform the power off notifications and advance the state to
2197 * Off or OffLS.
2198 */
2199 PDMR3PowerOff(pVM);
2200
2201 PUVM pUVM = pVM->pUVM;
2202 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2203 enmVMState = pVM->enmVMState;
2204 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2205 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
2206 else
2207 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF);
2208 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2209 }
2210 return VINF_EM_OFF;
2211}
2212
2213
2214/**
2215 * Power off the VM.
2216 *
2217 * @returns VBox status code. When called on EMT, this will be a strict status
2218 * code that has to be propagated up the call stack.
2219 *
2220 * @param pVM The handle of the VM to be powered off.
2221 *
2222 * @thread Any thread.
2223 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2224 * @vmstateto Off or OffLS
2225 */
2226VMMR3DECL(int) VMR3PowerOff(PVM pVM)
2227{
2228 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
2229 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2230
2231 /*
2232 * Gather all the EMTs to make sure there are no races before
2233 * changing the VM state.
2234 */
2235 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2236 vmR3PowerOff, NULL);
2237 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2238 return rc;
2239}
2240
2241
2242/**
2243 * Destroys the VM.
2244 *
2245 * The VM must be powered off (or never really powered on) to call this
2246 * function. The VM handle is destroyed and can no longer be used up successful
2247 * return.
2248 *
2249 * @returns VBox status code.
2250 *
2251 * @param pVM The handle of the VM which should be destroyed.
2252 *
2253 * @thread Any none emulation thread.
2254 * @vmstate Off, Created
2255 * @vmstateto N/A
2256 */
2257VMMR3DECL(int) VMR3Destroy(PVM pVM)
2258{
2259 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
2260
2261 /*
2262 * Validate input.
2263 */
2264 if (!pVM)
2265 return VERR_INVALID_PARAMETER;
2266 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2267 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2268
2269 /*
2270 * Change VM state to destroying and unlink the VM.
2271 */
2272 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2273 if (RT_FAILURE(rc))
2274 return rc;
2275
2276 /** @todo lock this when we start having multiple machines in a process... */
2277 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
2278 if (g_pUVMsHead == pUVM)
2279 g_pUVMsHead = pUVM->pNext;
2280 else
2281 {
2282 PUVM pPrev = g_pUVMsHead;
2283 while (pPrev && pPrev->pNext != pUVM)
2284 pPrev = pPrev->pNext;
2285 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
2286
2287 pPrev->pNext = pUVM->pNext;
2288 }
2289 pUVM->pNext = NULL;
2290
2291 /*
2292 * Notify registered at destruction listeners.
2293 */
2294 vmR3AtDtor(pVM);
2295
2296 /*
2297 * Call vmR3Destroy on each of the EMTs ending with EMT(0) doing the bulk
2298 * of the cleanup.
2299 */
2300 /* vmR3Destroy on all EMTs, ending with EMT(0). */
2301 rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2302 AssertLogRelRC(rc);
2303
2304 /* Wait for EMTs and destroy the UVM. */
2305 vmR3DestroyUVM(pUVM, 30000);
2306
2307 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2308 return VINF_SUCCESS;
2309}
2310
2311
2312/**
2313 * Internal destruction worker.
2314 *
2315 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2316 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2317 * VMR3Destroy().
2318 *
2319 * When called on EMT(0), it will performed the great bulk of the destruction.
2320 * When called on the other EMTs, they will do nothing and the whole purpose is
2321 * to return VINF_EM_TERMINATE so they break out of their run loops.
2322 *
2323 * @returns VINF_EM_TERMINATE.
2324 * @param pVM The VM handle.
2325 */
2326DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2327{
2328 PUVM pUVM = pVM->pUVM;
2329 PVMCPU pVCpu = VMMGetCpu(pVM);
2330 Assert(pVCpu);
2331 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2332
2333 /*
2334 * Only VCPU 0 does the full cleanup (last).
2335 */
2336 if (pVCpu->idCpu == 0)
2337 {
2338 /*
2339 * Dump statistics to the log.
2340 */
2341#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2342 RTLogFlags(NULL, "nodisabled nobuffered");
2343#endif
2344#ifdef VBOX_WITH_STATISTICS
2345 STAMR3Dump(pVM, "*");
2346#else
2347 LogRel(("************************* Statistics *************************\n"));
2348 STAMR3DumpToReleaseLog(pVM, "*");
2349 LogRel(("********************* End of statistics **********************\n"));
2350#endif
2351
2352 /*
2353 * Destroy the VM components.
2354 */
2355 int rc = TMR3Term(pVM);
2356 AssertRC(rc);
2357#ifdef VBOX_WITH_DEBUGGER
2358 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
2359 pUVM->vm.s.pvDBGC = NULL;
2360#endif
2361 AssertRC(rc);
2362 rc = FTMR3Term(pVM);
2363 AssertRC(rc);
2364 rc = DBGFR3Term(pVM);
2365 AssertRC(rc);
2366 rc = PDMR3Term(pVM);
2367 AssertRC(rc);
2368 rc = EMR3Term(pVM);
2369 AssertRC(rc);
2370 rc = IOMR3Term(pVM);
2371 AssertRC(rc);
2372 rc = CSAMR3Term(pVM);
2373 AssertRC(rc);
2374 rc = PATMR3Term(pVM);
2375 AssertRC(rc);
2376 rc = TRPMR3Term(pVM);
2377 AssertRC(rc);
2378 rc = SELMR3Term(pVM);
2379 AssertRC(rc);
2380 rc = REMR3Term(pVM);
2381 AssertRC(rc);
2382 rc = HWACCMR3Term(pVM);
2383 AssertRC(rc);
2384 rc = PGMR3Term(pVM);
2385 AssertRC(rc);
2386 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2387 AssertRC(rc);
2388 rc = CPUMR3Term(pVM);
2389 AssertRC(rc);
2390 SSMR3Term(pVM);
2391 rc = PDMR3CritSectTerm(pVM);
2392 AssertRC(rc);
2393 rc = MMR3Term(pVM);
2394 AssertRC(rc);
2395
2396 /*
2397 * We're done, tell the other EMTs to quit.
2398 */
2399 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2400 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2401 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2402 }
2403 return VINF_EM_TERMINATE;
2404}
2405
2406
2407/**
2408 * Destroys the UVM portion.
2409 *
2410 * This is called as the final step in the VM destruction or as the cleanup
2411 * in case of a creation failure.
2412 *
2413 * @param pVM VM Handle.
2414 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2415 * threads.
2416 */
2417static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2418{
2419 /*
2420 * Signal termination of each the emulation threads and
2421 * wait for them to complete.
2422 */
2423 /* Signal them. */
2424 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2425 if (pUVM->pVM)
2426 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2427 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2428 {
2429 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2430 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2431 }
2432
2433 /* Wait for them. */
2434 uint64_t NanoTS = RTTimeNanoTS();
2435 RTTHREAD hSelf = RTThreadSelf();
2436 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2437 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2438 {
2439 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2440 if ( hThread != NIL_RTTHREAD
2441 && hThread != hSelf)
2442 {
2443 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2444 int rc2 = RTThreadWait(hThread,
2445 cMilliesElapsed < cMilliesEMTWait
2446 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2447 : 2000,
2448 NULL);
2449 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2450 rc2 = RTThreadWait(hThread, 1000, NULL);
2451 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2452 if (RT_SUCCESS(rc2))
2453 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2454 }
2455 }
2456
2457 /* Cleanup the semaphores. */
2458 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2459 {
2460 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2461 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2462 }
2463
2464 /*
2465 * Free the event semaphores associated with the request packets.
2466 */
2467 unsigned cReqs = 0;
2468 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2469 {
2470 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2471 pUVM->vm.s.apReqFree[i] = NULL;
2472 for (; pReq; pReq = pReq->pNext, cReqs++)
2473 {
2474 pReq->enmState = VMREQSTATE_INVALID;
2475 RTSemEventDestroy(pReq->EventSem);
2476 }
2477 }
2478 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2479
2480 /*
2481 * Kill all queued requests. (There really shouldn't be any!)
2482 */
2483 for (unsigned i = 0; i < 10; i++)
2484 {
2485 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pReqs, NULL, PVMREQ);
2486 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2487 if (!pReqHead)
2488 break;
2489 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2490 {
2491 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2492 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2493 RTSemEventSignal(pReq->EventSem);
2494 RTThreadSleep(2);
2495 RTSemEventDestroy(pReq->EventSem);
2496 }
2497 /* give them a chance to respond before we free the request memory. */
2498 RTThreadSleep(32);
2499 }
2500
2501 /*
2502 * Now all queued VCPU requests (again, there shouldn't be any).
2503 */
2504 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2505 {
2506 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2507
2508 for (unsigned i = 0; i < 10; i++)
2509 {
2510 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pReqs, NULL, PVMREQ);
2511 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2512 if (!pReqHead)
2513 break;
2514 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2515 {
2516 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2517 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2518 RTSemEventSignal(pReq->EventSem);
2519 RTThreadSleep(2);
2520 RTSemEventDestroy(pReq->EventSem);
2521 }
2522 /* give them a chance to respond before we free the request memory. */
2523 RTThreadSleep(32);
2524 }
2525 }
2526
2527 /*
2528 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2529 */
2530 PDMR3TermUVM(pUVM);
2531
2532 /*
2533 * Terminate the support library if initialized.
2534 */
2535 if (pUVM->vm.s.pSession)
2536 {
2537 int rc = SUPR3Term(false /*fForced*/);
2538 AssertRC(rc);
2539 pUVM->vm.s.pSession = NIL_RTR0PTR;
2540 }
2541
2542 /*
2543 * Destroy the MM heap and free the UVM structure.
2544 */
2545 MMR3TermUVM(pUVM);
2546 STAMR3TermUVM(pUVM);
2547
2548#ifdef LOG_ENABLED
2549 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2550#endif
2551 RTTlsFree(pUVM->vm.s.idxTLS);
2552
2553 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2554 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
2555
2556 RTLogFlush(NULL);
2557}
2558
2559
2560/**
2561 * Enumerates the VMs in this process.
2562 *
2563 * @returns Pointer to the next VM.
2564 * @returns NULL when no more VMs.
2565 * @param pVMPrev The previous VM
2566 * Use NULL to start the enumeration.
2567 */
2568VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2569{
2570 /*
2571 * This is quick and dirty. It has issues with VM being
2572 * destroyed during the enumeration.
2573 */
2574 PUVM pNext;
2575 if (pVMPrev)
2576 pNext = pVMPrev->pUVM->pNext;
2577 else
2578 pNext = g_pUVMsHead;
2579 return pNext ? pNext->pVM : NULL;
2580}
2581
2582
2583/**
2584 * Registers an at VM destruction callback.
2585 *
2586 * @returns VBox status code.
2587 * @param pfnAtDtor Pointer to callback.
2588 * @param pvUser User argument.
2589 */
2590VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2591{
2592 /*
2593 * Check if already registered.
2594 */
2595 VM_ATDTOR_LOCK();
2596 PVMATDTOR pCur = g_pVMAtDtorHead;
2597 while (pCur)
2598 {
2599 if (pfnAtDtor == pCur->pfnAtDtor)
2600 {
2601 VM_ATDTOR_UNLOCK();
2602 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2603 return VERR_INVALID_PARAMETER;
2604 }
2605
2606 /* next */
2607 pCur = pCur->pNext;
2608 }
2609 VM_ATDTOR_UNLOCK();
2610
2611 /*
2612 * Allocate new entry.
2613 */
2614 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2615 if (!pVMAtDtor)
2616 return VERR_NO_MEMORY;
2617
2618 VM_ATDTOR_LOCK();
2619 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2620 pVMAtDtor->pvUser = pvUser;
2621 pVMAtDtor->pNext = g_pVMAtDtorHead;
2622 g_pVMAtDtorHead = pVMAtDtor;
2623 VM_ATDTOR_UNLOCK();
2624
2625 return VINF_SUCCESS;
2626}
2627
2628
2629/**
2630 * Deregisters an at VM destruction callback.
2631 *
2632 * @returns VBox status code.
2633 * @param pfnAtDtor Pointer to callback.
2634 */
2635VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2636{
2637 /*
2638 * Find it, unlink it and free it.
2639 */
2640 VM_ATDTOR_LOCK();
2641 PVMATDTOR pPrev = NULL;
2642 PVMATDTOR pCur = g_pVMAtDtorHead;
2643 while (pCur)
2644 {
2645 if (pfnAtDtor == pCur->pfnAtDtor)
2646 {
2647 if (pPrev)
2648 pPrev->pNext = pCur->pNext;
2649 else
2650 g_pVMAtDtorHead = pCur->pNext;
2651 pCur->pNext = NULL;
2652 VM_ATDTOR_UNLOCK();
2653
2654 RTMemFree(pCur);
2655 return VINF_SUCCESS;
2656 }
2657
2658 /* next */
2659 pPrev = pCur;
2660 pCur = pCur->pNext;
2661 }
2662 VM_ATDTOR_UNLOCK();
2663
2664 return VERR_INVALID_PARAMETER;
2665}
2666
2667
2668/**
2669 * Walks the list of at VM destructor callbacks.
2670 * @param pVM The VM which is about to be destroyed.
2671 */
2672static void vmR3AtDtor(PVM pVM)
2673{
2674 /*
2675 * Find it, unlink it and free it.
2676 */
2677 VM_ATDTOR_LOCK();
2678 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2679 pCur->pfnAtDtor(pVM, pCur->pvUser);
2680 VM_ATDTOR_UNLOCK();
2681}
2682
2683
2684/**
2685 * Worker which checks integrity of some internal structures.
2686 * This is yet another attempt to track down that AVL tree crash.
2687 */
2688static void vmR3CheckIntegrity(PVM pVM)
2689{
2690#ifdef VBOX_STRICT
2691 int rc = PGMR3CheckIntegrity(pVM);
2692 AssertReleaseRC(rc);
2693#endif
2694}
2695
2696
2697/**
2698 * EMT rendezvous worker for VMR3Reset.
2699 *
2700 * This is called by the emulation threads as a response to the reset request
2701 * issued by VMR3Reset().
2702 *
2703 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2704 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2705 *
2706 * @param pVM The VM handle.
2707 * @param pVCpu The VMCPU handle of the EMT.
2708 * @param pvUser Ignored.
2709 */
2710static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2711{
2712 Assert(!pvUser); NOREF(pvUser);
2713
2714 /*
2715 * The first EMT will try change the state to resetting. If this fails,
2716 * we won't get called for the other EMTs.
2717 */
2718 if (pVCpu->idCpu == pVM->cCpus - 1)
2719 {
2720 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3,
2721 VMSTATE_RESETTING, VMSTATE_RUNNING,
2722 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2723 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2724 if (RT_FAILURE(rc))
2725 return rc;
2726 }
2727
2728 /*
2729 * Check the state.
2730 */
2731 VMSTATE enmVMState = VMR3GetState(pVM);
2732 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2733 || enmVMState == VMSTATE_RESETTING_LS,
2734 ("%s\n", VMR3GetStateName(enmVMState)),
2735 VERR_INTERNAL_ERROR_4);
2736
2737 /*
2738 * EMT(0) does the full cleanup *after* all the other EMTs has been
2739 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2740 *
2741 * Because there are per-cpu reset routines and order may/is important,
2742 * the following sequence looks a bit ugly...
2743 */
2744 if (pVCpu->idCpu == 0)
2745 vmR3CheckIntegrity(pVM);
2746
2747 /* Reset the VCpu state. */
2748 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2749
2750 /* Clear all pending forced actions. */
2751 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2752
2753 /*
2754 * Reset the VM components.
2755 */
2756 if (pVCpu->idCpu == 0)
2757 {
2758 PATMR3Reset(pVM);
2759 CSAMR3Reset(pVM);
2760 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2761 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2762/** @todo PGMR3Reset should be called after PDMR3Reset really, because we'll trash OS <-> hardware
2763 * communication structures residing in RAM when done in the other order. I.e. the device must be
2764 * quiesced first, then we clear the memory and plan tables. Probably have to make these things
2765 * explicit in some way, some memory setup pass or something.
2766 * (Example: DevAHCI may assert if memory is zeroed before it has read the FIS.)
2767 *
2768 * @bugref{4467}
2769 */
2770 MMR3Reset(pVM);
2771 PDMR3Reset(pVM);
2772 SELMR3Reset(pVM);
2773 TRPMR3Reset(pVM);
2774 REMR3Reset(pVM);
2775 IOMR3Reset(pVM);
2776 CPUMR3Reset(pVM);
2777 }
2778 CPUMR3ResetCpu(pVCpu);
2779 if (pVCpu->idCpu == 0)
2780 {
2781 TMR3Reset(pVM);
2782 EMR3Reset(pVM);
2783 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2784
2785#ifdef LOG_ENABLED
2786 /*
2787 * Debug logging.
2788 */
2789 RTLogPrintf("\n\nThe VM was reset:\n");
2790 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2791#endif
2792
2793 /*
2794 * Since EMT(0) is the last to go thru here, it will advance the state.
2795 * When a live save is active, we will move on to SuspendingLS but
2796 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2797 */
2798 PUVM pUVM = pVM->pUVM;
2799 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2800 enmVMState = pVM->enmVMState;
2801 if (enmVMState == VMSTATE_RESETTING)
2802 {
2803 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2804 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2805 else
2806 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING);
2807 }
2808 else
2809 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS);
2810 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2811
2812 vmR3CheckIntegrity(pVM);
2813
2814 /*
2815 * Do the suspend bit as well.
2816 * It only requires some EMT(0) work at present.
2817 */
2818 if (enmVMState != VMSTATE_RESETTING)
2819 {
2820 vmR3SuspendDoWork(pVM);
2821 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2822 }
2823 }
2824
2825 return enmVMState == VMSTATE_RESETTING
2826 ? VINF_EM_RESET
2827 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2828}
2829
2830
2831/**
2832 * Reset the current VM.
2833 *
2834 * @returns VBox status code.
2835 * @param pVM VM to reset.
2836 */
2837VMMR3DECL(int) VMR3Reset(PVM pVM)
2838{
2839 LogFlow(("VMR3Reset:\n"));
2840 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2841
2842 /*
2843 * Gather all the EMTs to make sure there are no races before
2844 * changing the VM state.
2845 */
2846 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2847 vmR3Reset, NULL);
2848 LogFlow(("VMR3Reset: returns %Rrc\n", rc));
2849 return rc;
2850}
2851
2852
2853/**
2854 * Gets the current VM state.
2855 *
2856 * @returns The current VM state.
2857 * @param pVM VM handle.
2858 * @thread Any
2859 */
2860VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
2861{
2862 return pVM->enmVMState;
2863}
2864
2865
2866/**
2867 * Gets the state name string for a VM state.
2868 *
2869 * @returns Pointer to the state name. (readonly)
2870 * @param enmState The state.
2871 */
2872VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
2873{
2874 switch (enmState)
2875 {
2876 case VMSTATE_CREATING: return "CREATING";
2877 case VMSTATE_CREATED: return "CREATED";
2878 case VMSTATE_LOADING: return "LOADING";
2879 case VMSTATE_POWERING_ON: return "POWERING_ON";
2880 case VMSTATE_RESUMING: return "RESUMING";
2881 case VMSTATE_RUNNING: return "RUNNING";
2882 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
2883 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
2884 case VMSTATE_RESETTING: return "RESETTING";
2885 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
2886 case VMSTATE_SUSPENDED: return "SUSPENDED";
2887 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
2888 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
2889 case VMSTATE_SUSPENDING: return "SUSPENDING";
2890 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
2891 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
2892 case VMSTATE_SAVING: return "SAVING";
2893 case VMSTATE_DEBUGGING: return "DEBUGGING";
2894 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
2895 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
2896 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
2897 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
2898 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
2899 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
2900 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
2901 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
2902 case VMSTATE_OFF: return "OFF";
2903 case VMSTATE_OFF_LS: return "OFF_LS";
2904 case VMSTATE_DESTROYING: return "DESTROYING";
2905 case VMSTATE_TERMINATED: return "TERMINATED";
2906
2907 default:
2908 AssertMsgFailed(("Unknown state %d\n", enmState));
2909 return "Unknown!\n";
2910 }
2911}
2912
2913
2914/**
2915 * Validates the state transition in strict builds.
2916 *
2917 * @returns true if valid, false if not.
2918 *
2919 * @param enmStateOld The old (current) state.
2920 * @param enmStateNew The proposed new state.
2921 *
2922 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
2923 * diagram (under State Machine Diagram).
2924 */
2925static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
2926{
2927#ifdef VBOX_STRICT
2928 switch (enmStateOld)
2929 {
2930 case VMSTATE_CREATING:
2931 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2932 break;
2933
2934 case VMSTATE_CREATED:
2935 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
2936 || enmStateNew == VMSTATE_POWERING_ON
2937 || enmStateNew == VMSTATE_POWERING_OFF
2938 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2939 break;
2940
2941 case VMSTATE_LOADING:
2942 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
2943 || enmStateNew == VMSTATE_LOAD_FAILURE
2944 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2945 break;
2946
2947 case VMSTATE_POWERING_ON:
2948 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2949 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
2950 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2951 break;
2952
2953 case VMSTATE_RESUMING:
2954 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2955 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
2956 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2957 break;
2958
2959 case VMSTATE_RUNNING:
2960 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2961 || enmStateNew == VMSTATE_SUSPENDING
2962 || enmStateNew == VMSTATE_RESETTING
2963 || enmStateNew == VMSTATE_RUNNING_LS
2964 || enmStateNew == VMSTATE_RUNNING_FT
2965 || enmStateNew == VMSTATE_DEBUGGING
2966 || enmStateNew == VMSTATE_FATAL_ERROR
2967 || enmStateNew == VMSTATE_GURU_MEDITATION
2968 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2969 break;
2970
2971 case VMSTATE_RUNNING_LS:
2972 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
2973 || enmStateNew == VMSTATE_SUSPENDING_LS
2974 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
2975 || enmStateNew == VMSTATE_RESETTING_LS
2976 || enmStateNew == VMSTATE_RUNNING
2977 || enmStateNew == VMSTATE_DEBUGGING_LS
2978 || enmStateNew == VMSTATE_FATAL_ERROR_LS
2979 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
2980 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2981 break;
2982
2983 case VMSTATE_RUNNING_FT:
2984 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2985 || enmStateNew == VMSTATE_FATAL_ERROR
2986 || enmStateNew == VMSTATE_GURU_MEDITATION
2987 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2988 break;
2989
2990 case VMSTATE_RESETTING:
2991 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2992 break;
2993
2994 case VMSTATE_RESETTING_LS:
2995 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
2996 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2997 break;
2998
2999 case VMSTATE_SUSPENDING:
3000 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3001 break;
3002
3003 case VMSTATE_SUSPENDING_LS:
3004 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3005 || enmStateNew == VMSTATE_SUSPENDED_LS
3006 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3007 break;
3008
3009 case VMSTATE_SUSPENDING_EXT_LS:
3010 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3011 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3012 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3013 break;
3014
3015 case VMSTATE_SUSPENDED:
3016 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3017 || enmStateNew == VMSTATE_SAVING
3018 || enmStateNew == VMSTATE_RESETTING
3019 || enmStateNew == VMSTATE_RESUMING
3020 || enmStateNew == VMSTATE_LOADING
3021 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3022 break;
3023
3024 case VMSTATE_SUSPENDED_LS:
3025 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3026 || enmStateNew == VMSTATE_SAVING
3027 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3028 break;
3029
3030 case VMSTATE_SUSPENDED_EXT_LS:
3031 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3032 || enmStateNew == VMSTATE_SAVING
3033 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3034 break;
3035
3036 case VMSTATE_SAVING:
3037 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3038 break;
3039
3040 case VMSTATE_DEBUGGING:
3041 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3042 || enmStateNew == VMSTATE_POWERING_OFF
3043 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3044 break;
3045
3046 case VMSTATE_DEBUGGING_LS:
3047 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3048 || enmStateNew == VMSTATE_RUNNING_LS
3049 || enmStateNew == VMSTATE_POWERING_OFF_LS
3050 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3051 break;
3052
3053 case VMSTATE_POWERING_OFF:
3054 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3055 break;
3056
3057 case VMSTATE_POWERING_OFF_LS:
3058 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3059 || enmStateNew == VMSTATE_OFF_LS
3060 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3061 break;
3062
3063 case VMSTATE_OFF:
3064 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3065 break;
3066
3067 case VMSTATE_OFF_LS:
3068 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3069 break;
3070
3071 case VMSTATE_FATAL_ERROR:
3072 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3073 break;
3074
3075 case VMSTATE_FATAL_ERROR_LS:
3076 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3077 || enmStateNew == VMSTATE_POWERING_OFF_LS
3078 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3079 break;
3080
3081 case VMSTATE_GURU_MEDITATION:
3082 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3083 || enmStateNew == VMSTATE_POWERING_OFF
3084 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3085 break;
3086
3087 case VMSTATE_GURU_MEDITATION_LS:
3088 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3089 || enmStateNew == VMSTATE_DEBUGGING_LS
3090 || enmStateNew == VMSTATE_POWERING_OFF_LS
3091 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3092 break;
3093
3094 case VMSTATE_LOAD_FAILURE:
3095 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3096 break;
3097
3098 case VMSTATE_DESTROYING:
3099 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3100 break;
3101
3102 case VMSTATE_TERMINATED:
3103 default:
3104 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3105 break;
3106 }
3107#endif /* VBOX_STRICT */
3108 return true;
3109}
3110
3111
3112/**
3113 * Does the state change callouts.
3114 *
3115 * The caller owns the AtStateCritSect.
3116 *
3117 * @param pVM The VM handle.
3118 * @param pUVM The UVM handle.
3119 * @param enmStateNew The New state.
3120 * @param enmStateOld The old state.
3121 */
3122static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3123{
3124 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3125
3126 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3127 {
3128 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
3129 if ( enmStateNew != VMSTATE_DESTROYING
3130 && pVM->enmVMState == VMSTATE_DESTROYING)
3131 break;
3132 AssertMsg(pVM->enmVMState == enmStateNew,
3133 ("You are not allowed to change the state while in the change callback, except "
3134 "from destroying the VM. There are restrictions in the way the state changes "
3135 "are propagated up to the EM execution loop and it makes the program flow very "
3136 "difficult to follow. (%s, expected %s, old %s)\n",
3137 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3138 VMR3GetStateName(enmStateOld)));
3139 }
3140}
3141
3142
3143/**
3144 * Sets the current VM state, with the AtStatCritSect already entered.
3145 *
3146 * @param pVM The VM handle.
3147 * @param pUVM The UVM handle.
3148 * @param enmStateNew The new state.
3149 * @param enmStateOld The old state.
3150 */
3151static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3152{
3153 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3154
3155 AssertMsg(pVM->enmVMState == enmStateOld,
3156 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3157 pUVM->vm.s.enmPrevVMState = enmStateOld;
3158 pVM->enmVMState = enmStateNew;
3159 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3160
3161 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3162}
3163
3164
3165/**
3166 * Sets the current VM state.
3167 *
3168 * @param pVM VM handle.
3169 * @param enmStateNew The new state.
3170 * @param enmStateOld The old state (for asserting only).
3171 */
3172static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3173{
3174 PUVM pUVM = pVM->pUVM;
3175 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3176
3177 AssertMsg(pVM->enmVMState == enmStateOld,
3178 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3179 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
3180
3181 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3182}
3183
3184
3185/**
3186 * Tries to perform a state transition.
3187 *
3188 * @returns The 1-based ordinal of the succeeding transition.
3189 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3190 *
3191 * @param pVM The VM handle.
3192 * @param pszWho Who is trying to change it.
3193 * @param cTransitions The number of transitions in the ellipsis.
3194 * @param ... Transition pairs; new, old.
3195 */
3196static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3197{
3198 va_list va;
3199 VMSTATE enmStateNew = VMSTATE_CREATED;
3200 VMSTATE enmStateOld = VMSTATE_CREATED;
3201
3202#ifdef VBOX_STRICT
3203 /*
3204 * Validate the input first.
3205 */
3206 va_start(va, cTransitions);
3207 for (unsigned i = 0; i < cTransitions; i++)
3208 {
3209 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3210 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3211 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3212 }
3213 va_end(va);
3214#endif
3215
3216 /*
3217 * Grab the lock and see if any of the proposed transitions works out.
3218 */
3219 va_start(va, cTransitions);
3220 int rc = VERR_VM_INVALID_VM_STATE;
3221 PUVM pUVM = pVM->pUVM;
3222 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3223
3224 VMSTATE enmStateCur = pVM->enmVMState;
3225
3226 for (unsigned i = 0; i < cTransitions; i++)
3227 {
3228 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3229 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3230 if (enmStateCur == enmStateOld)
3231 {
3232 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
3233 rc = i + 1;
3234 break;
3235 }
3236 }
3237
3238 if (RT_FAILURE(rc))
3239 {
3240 /*
3241 * Complain about it.
3242 */
3243 if (cTransitions == 1)
3244 {
3245 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3246 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3247 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3248 N_("%s failed because the VM state is %s instead of %s"),
3249 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3250 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3251 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3252 }
3253 else
3254 {
3255 va_end(va);
3256 va_start(va, cTransitions);
3257 LogRel(("%s:\n", pszWho));
3258 for (unsigned i = 0; i < cTransitions; i++)
3259 {
3260 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3261 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3262 LogRel(("%s%s -> %s",
3263 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3264 }
3265 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3266 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3267 N_("%s failed because the current VM state, %s, was not found in the state transition table"),
3268 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3269 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3270 pszWho, VMR3GetStateName(enmStateCur)));
3271 }
3272 }
3273
3274 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3275 va_end(va);
3276 Assert(rc > 0 || rc < 0);
3277 return rc;
3278}
3279
3280
3281/**
3282 * Flag a guru meditation ... a hack.
3283 *
3284 * @param pVM The VM handle
3285 *
3286 * @todo Rewrite this part. The guru meditation should be flagged
3287 * immediately by the VMM and not by VMEmt.cpp when it's all over.
3288 */
3289void vmR3SetGuruMeditation(PVM pVM)
3290{
3291 PUVM pUVM = pVM->pUVM;
3292 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3293
3294 VMSTATE enmStateCur = pVM->enmVMState;
3295 if (enmStateCur == VMSTATE_RUNNING)
3296 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING);
3297 else if (enmStateCur == VMSTATE_RUNNING_LS)
3298 {
3299 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
3300 SSMR3Cancel(pVM);
3301 }
3302
3303 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3304}
3305
3306
3307/**
3308 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3309 *
3310 * @param pVM The VM handle.
3311 */
3312void vmR3SetTerminated(PVM pVM)
3313{
3314 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3315}
3316
3317
3318/**
3319 * Checks if the VM was teleported and hasn't been fully resumed yet.
3320 *
3321 * This applies to both sides of the teleportation since we may leave a working
3322 * clone behind and the user is allowed to resume this...
3323 *
3324 * @returns true / false.
3325 * @param pVM The VM handle.
3326 * @thread Any thread.
3327 */
3328VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3329{
3330 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3331 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3332}
3333
3334
3335/**
3336 * Registers a VM state change callback.
3337 *
3338 * You are not allowed to call any function which changes the VM state from a
3339 * state callback.
3340 *
3341 * @returns VBox status code.
3342 * @param pVM VM handle.
3343 * @param pfnAtState Pointer to callback.
3344 * @param pvUser User argument.
3345 * @thread Any.
3346 */
3347VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3348{
3349 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3350
3351 /*
3352 * Validate input.
3353 */
3354 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3355 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3356
3357 /*
3358 * Allocate a new record.
3359 */
3360 PUVM pUVM = pVM->pUVM;
3361 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3362 if (!pNew)
3363 return VERR_NO_MEMORY;
3364
3365 /* fill */
3366 pNew->pfnAtState = pfnAtState;
3367 pNew->pvUser = pvUser;
3368
3369 /* insert */
3370 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3371 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3372 *pUVM->vm.s.ppAtStateNext = pNew;
3373 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3374 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3375
3376 return VINF_SUCCESS;
3377}
3378
3379
3380/**
3381 * Deregisters a VM state change callback.
3382 *
3383 * @returns VBox status code.
3384 * @param pVM VM handle.
3385 * @param pfnAtState Pointer to callback.
3386 * @param pvUser User argument.
3387 * @thread Any.
3388 */
3389VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3390{
3391 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3392
3393 /*
3394 * Validate input.
3395 */
3396 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3397 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3398
3399 PUVM pUVM = pVM->pUVM;
3400 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3401
3402 /*
3403 * Search the list for the entry.
3404 */
3405 PVMATSTATE pPrev = NULL;
3406 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3407 while ( pCur
3408 && ( pCur->pfnAtState != pfnAtState
3409 || pCur->pvUser != pvUser))
3410 {
3411 pPrev = pCur;
3412 pCur = pCur->pNext;
3413 }
3414 if (!pCur)
3415 {
3416 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3417 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3418 return VERR_FILE_NOT_FOUND;
3419 }
3420
3421 /*
3422 * Unlink it.
3423 */
3424 if (pPrev)
3425 {
3426 pPrev->pNext = pCur->pNext;
3427 if (!pCur->pNext)
3428 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3429 }
3430 else
3431 {
3432 pUVM->vm.s.pAtState = pCur->pNext;
3433 if (!pCur->pNext)
3434 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3435 }
3436
3437 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3438
3439 /*
3440 * Free it.
3441 */
3442 pCur->pfnAtState = NULL;
3443 pCur->pNext = NULL;
3444 MMR3HeapFree(pCur);
3445
3446 return VINF_SUCCESS;
3447}
3448
3449
3450/**
3451 * Registers a VM error callback.
3452 *
3453 * @returns VBox status code.
3454 * @param pVM The VM handle.
3455 * @param pfnAtError Pointer to callback.
3456 * @param pvUser User argument.
3457 * @thread Any.
3458 */
3459VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3460{
3461 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3462}
3463
3464
3465/**
3466 * Registers a VM error callback.
3467 *
3468 * @returns VBox status code.
3469 * @param pUVM The VM handle.
3470 * @param pfnAtError Pointer to callback.
3471 * @param pvUser User argument.
3472 * @thread Any.
3473 */
3474VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3475{
3476 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3477
3478 /*
3479 * Validate input.
3480 */
3481 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3482 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3483
3484 /*
3485 * Allocate a new record.
3486 */
3487 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3488 if (!pNew)
3489 return VERR_NO_MEMORY;
3490
3491 /* fill */
3492 pNew->pfnAtError = pfnAtError;
3493 pNew->pvUser = pvUser;
3494
3495 /* insert */
3496 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3497 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3498 *pUVM->vm.s.ppAtErrorNext = pNew;
3499 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3500 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3501
3502 return VINF_SUCCESS;
3503}
3504
3505
3506/**
3507 * Deregisters a VM error callback.
3508 *
3509 * @returns VBox status code.
3510 * @param pVM The VM handle.
3511 * @param pfnAtError Pointer to callback.
3512 * @param pvUser User argument.
3513 * @thread Any.
3514 */
3515VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3516{
3517 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3518
3519 /*
3520 * Validate input.
3521 */
3522 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3523 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3524
3525 PUVM pUVM = pVM->pUVM;
3526 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3527
3528 /*
3529 * Search the list for the entry.
3530 */
3531 PVMATERROR pPrev = NULL;
3532 PVMATERROR pCur = pUVM->vm.s.pAtError;
3533 while ( pCur
3534 && ( pCur->pfnAtError != pfnAtError
3535 || pCur->pvUser != pvUser))
3536 {
3537 pPrev = pCur;
3538 pCur = pCur->pNext;
3539 }
3540 if (!pCur)
3541 {
3542 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3543 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3544 return VERR_FILE_NOT_FOUND;
3545 }
3546
3547 /*
3548 * Unlink it.
3549 */
3550 if (pPrev)
3551 {
3552 pPrev->pNext = pCur->pNext;
3553 if (!pCur->pNext)
3554 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3555 }
3556 else
3557 {
3558 pUVM->vm.s.pAtError = pCur->pNext;
3559 if (!pCur->pNext)
3560 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3561 }
3562
3563 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3564
3565 /*
3566 * Free it.
3567 */
3568 pCur->pfnAtError = NULL;
3569 pCur->pNext = NULL;
3570 MMR3HeapFree(pCur);
3571
3572 return VINF_SUCCESS;
3573}
3574
3575
3576/**
3577 * Ellipsis to va_list wrapper for calling pfnAtError.
3578 */
3579static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3580{
3581 va_list va;
3582 va_start(va, pszFormat);
3583 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3584 va_end(va);
3585}
3586
3587
3588/**
3589 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3590 * The message is found in VMINT.
3591 *
3592 * @param pVM The VM handle.
3593 * @thread EMT.
3594 */
3595VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3596{
3597 VM_ASSERT_EMT(pVM);
3598 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contracts!\n"));
3599
3600 /*
3601 * Unpack the error (if we managed to format one).
3602 */
3603 PVMERROR pErr = pVM->vm.s.pErrorR3;
3604 const char *pszFile = NULL;
3605 const char *pszFunction = NULL;
3606 uint32_t iLine = 0;
3607 const char *pszMessage;
3608 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3609 if (pErr)
3610 {
3611 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3612 if (pErr->offFile)
3613 pszFile = (const char *)pErr + pErr->offFile;
3614 iLine = pErr->iLine;
3615 if (pErr->offFunction)
3616 pszFunction = (const char *)pErr + pErr->offFunction;
3617 if (pErr->offMessage)
3618 pszMessage = (const char *)pErr + pErr->offMessage;
3619 else
3620 pszMessage = "No message!";
3621 }
3622 else
3623 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3624
3625 /*
3626 * Call the at error callbacks.
3627 */
3628 PUVM pUVM = pVM->pUVM;
3629 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3630 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3631 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3632 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3633 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3634}
3635
3636
3637/**
3638 * Gets the number of errors raised via VMSetError.
3639 *
3640 * This can be used avoid double error messages.
3641 *
3642 * @returns The error count.
3643 * @param pVM The VM handle.
3644 */
3645VMMR3DECL(uint32_t) VMR3GetErrorCount(PVM pVM)
3646{
3647 AssertPtrReturn(pVM, 0);
3648 return VMR3GetErrorCountU(pVM->pUVM);
3649}
3650
3651
3652/**
3653 * Gets the number of errors raised via VMSetError.
3654 *
3655 * This can be used avoid double error messages.
3656 *
3657 * @returns The error count.
3658 * @param pVM The VM handle.
3659 */
3660VMMR3DECL(uint32_t) VMR3GetErrorCountU(PUVM pUVM)
3661{
3662 AssertPtrReturn(pUVM, 0);
3663 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3664 return pUVM->vm.s.cErrors;
3665}
3666
3667
3668/**
3669 * Creation time wrapper for vmR3SetErrorUV.
3670 *
3671 * @returns rc.
3672 * @param pUVM Pointer to the user mode VM structure.
3673 * @param rc The VBox status code.
3674 * @param RT_SRC_POS_DECL The source position of this error.
3675 * @param pszFormat Format string.
3676 * @param ... The arguments.
3677 * @thread Any thread.
3678 */
3679static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3680{
3681 va_list va;
3682 va_start(va, pszFormat);
3683 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3684 va_end(va);
3685 return rc;
3686}
3687
3688
3689/**
3690 * Worker which calls everyone listening to the VM error messages.
3691 *
3692 * @param pUVM Pointer to the user mode VM structure.
3693 * @param rc The VBox status code.
3694 * @param RT_SRC_POS_DECL The source position of this error.
3695 * @param pszFormat Format string.
3696 * @param pArgs Pointer to the format arguments.
3697 * @thread EMT
3698 */
3699DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3700{
3701 /*
3702 * Log the error.
3703 */
3704 va_list va3;
3705 va_copy(va3, *pArgs);
3706 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3707 "VMSetError: %N\n",
3708 pszFile, iLine, pszFunction, rc,
3709 pszFormat, &va3);
3710 va_end(va3);
3711
3712#ifdef LOG_ENABLED
3713 va_copy(va3, *pArgs);
3714 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3715 "%N\n",
3716 pszFile, iLine, pszFunction, rc,
3717 pszFormat, &va3);
3718 va_end(va3);
3719#endif
3720
3721 /*
3722 * Make a copy of the message.
3723 */
3724 if (pUVM->pVM)
3725 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3726
3727 /*
3728 * Call the at error callbacks.
3729 */
3730 bool fCalledSomeone = false;
3731 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3732 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3733 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3734 {
3735 va_list va2;
3736 va_copy(va2, *pArgs);
3737 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3738 va_end(va2);
3739 fCalledSomeone = true;
3740 }
3741 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3742}
3743
3744
3745/**
3746 * Registers a VM runtime error callback.
3747 *
3748 * @returns VBox status code.
3749 * @param pVM The VM handle.
3750 * @param pfnAtRuntimeError Pointer to callback.
3751 * @param pvUser User argument.
3752 * @thread Any.
3753 */
3754VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3755{
3756 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3757
3758 /*
3759 * Validate input.
3760 */
3761 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3762 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3763
3764 /*
3765 * Allocate a new record.
3766 */
3767 PUVM pUVM = pVM->pUVM;
3768 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3769 if (!pNew)
3770 return VERR_NO_MEMORY;
3771
3772 /* fill */
3773 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3774 pNew->pvUser = pvUser;
3775
3776 /* insert */
3777 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3778 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3779 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3780 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3781 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3782
3783 return VINF_SUCCESS;
3784}
3785
3786
3787/**
3788 * Deregisters a VM runtime error callback.
3789 *
3790 * @returns VBox status code.
3791 * @param pVM The VM handle.
3792 * @param pfnAtRuntimeError Pointer to callback.
3793 * @param pvUser User argument.
3794 * @thread Any.
3795 */
3796VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3797{
3798 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3799
3800 /*
3801 * Validate input.
3802 */
3803 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3804 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3805
3806 PUVM pUVM = pVM->pUVM;
3807 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3808
3809 /*
3810 * Search the list for the entry.
3811 */
3812 PVMATRUNTIMEERROR pPrev = NULL;
3813 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3814 while ( pCur
3815 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3816 || pCur->pvUser != pvUser))
3817 {
3818 pPrev = pCur;
3819 pCur = pCur->pNext;
3820 }
3821 if (!pCur)
3822 {
3823 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
3824 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3825 return VERR_FILE_NOT_FOUND;
3826 }
3827
3828 /*
3829 * Unlink it.
3830 */
3831 if (pPrev)
3832 {
3833 pPrev->pNext = pCur->pNext;
3834 if (!pCur->pNext)
3835 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
3836 }
3837 else
3838 {
3839 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
3840 if (!pCur->pNext)
3841 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
3842 }
3843
3844 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3845
3846 /*
3847 * Free it.
3848 */
3849 pCur->pfnAtRuntimeError = NULL;
3850 pCur->pNext = NULL;
3851 MMR3HeapFree(pCur);
3852
3853 return VINF_SUCCESS;
3854}
3855
3856
3857/**
3858 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
3859 * the state to FatalError(LS).
3860 *
3861 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
3862 * return code, see FNVMMEMTRENDEZVOUS.)
3863 *
3864 * @param pVM The VM handle.
3865 * @param pVCpu The VMCPU handle of the EMT.
3866 * @param pvUser Ignored.
3867 */
3868static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
3869{
3870 NOREF(pVCpu);
3871 Assert(!pvUser); NOREF(pvUser);
3872
3873 /*
3874 * The first EMT thru here changes the state.
3875 */
3876 if (pVCpu->idCpu == pVM->cCpus - 1)
3877 {
3878 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
3879 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
3880 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
3881 if (RT_FAILURE(rc))
3882 return rc;
3883 if (rc == 2)
3884 SSMR3Cancel(pVM);
3885
3886 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3887 }
3888
3889 /* This'll make sure we get out of whereever we are (e.g. REM). */
3890 return VINF_EM_SUSPEND;
3891}
3892
3893
3894/**
3895 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
3896 *
3897 * This does the common parts after the error has been saved / retrieved.
3898 *
3899 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3900 *
3901 * @param pVM The VM handle.
3902 * @param fFlags The error flags.
3903 * @param pszErrorId Error ID string.
3904 * @param pszFormat Format string.
3905 * @param pVa Pointer to the format arguments.
3906 */
3907static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
3908{
3909 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
3910
3911 /*
3912 * Take actions before the call.
3913 */
3914 int rc;
3915 if (fFlags & VMSETRTERR_FLAGS_FATAL)
3916 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
3917 vmR3SetRuntimeErrorChangeState, NULL);
3918 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
3919 rc = VMR3Suspend(pVM);
3920 else
3921 rc = VINF_SUCCESS;
3922
3923 /*
3924 * Do the callback round.
3925 */
3926 PUVM pUVM = pVM->pUVM;
3927 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3928 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3929 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
3930 {
3931 va_list va;
3932 va_copy(va, *pVa);
3933 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
3934 va_end(va);
3935 }
3936 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3937
3938 return rc;
3939}
3940
3941
3942/**
3943 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
3944 */
3945static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
3946{
3947 va_list va;
3948 va_start(va, pszFormat);
3949 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
3950 va_end(va);
3951 return rc;
3952}
3953
3954
3955/**
3956 * This is a worker function for RC and Ring-0 calls to VMSetError and
3957 * VMSetErrorV.
3958 *
3959 * The message is found in VMINT.
3960 *
3961 * @returns VBox status code, see VMSetRuntimeError.
3962 * @param pVM The VM handle.
3963 * @thread EMT.
3964 */
3965VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
3966{
3967 VM_ASSERT_EMT(pVM);
3968 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
3969
3970 /*
3971 * Unpack the error (if we managed to format one).
3972 */
3973 const char *pszErrorId = "SetRuntimeError";
3974 const char *pszMessage = "No message!";
3975 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
3976 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
3977 if (pErr)
3978 {
3979 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3980 if (pErr->offErrorId)
3981 pszErrorId = (const char *)pErr + pErr->offErrorId;
3982 if (pErr->offMessage)
3983 pszMessage = (const char *)pErr + pErr->offMessage;
3984 fFlags = pErr->fFlags;
3985 }
3986
3987 /*
3988 * Join cause with vmR3SetRuntimeErrorV.
3989 */
3990 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
3991}
3992
3993
3994/**
3995 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
3996 *
3997 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3998 *
3999 * @param pVM The VM handle.
4000 * @param fFlags The error flags.
4001 * @param pszErrorId Error ID string.
4002 * @param pszMessage The error message residing the MM heap.
4003 *
4004 * @thread EMT
4005 */
4006DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4007{
4008#if 0 /** @todo make copy of the error msg. */
4009 /*
4010 * Make a copy of the message.
4011 */
4012 va_list va2;
4013 va_copy(va2, *pVa);
4014 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4015 va_end(va2);
4016#endif
4017
4018 /*
4019 * Join paths with VMR3SetRuntimeErrorWorker.
4020 */
4021 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4022 MMR3HeapFree(pszMessage);
4023 return rc;
4024}
4025
4026
4027/**
4028 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4029 *
4030 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4031 *
4032 * @param pVM The VM handle.
4033 * @param fFlags The error flags.
4034 * @param pszErrorId Error ID string.
4035 * @param pszFormat Format string.
4036 * @param pVa Pointer to the format arguments.
4037 *
4038 * @thread EMT
4039 */
4040DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4041{
4042 /*
4043 * Make a copy of the message.
4044 */
4045 va_list va2;
4046 va_copy(va2, *pVa);
4047 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4048 va_end(va2);
4049
4050 /*
4051 * Join paths with VMR3SetRuntimeErrorWorker.
4052 */
4053 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4054}
4055
4056
4057/**
4058 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4059 *
4060 * This can be used avoid double error messages.
4061 *
4062 * @returns The runtime error count.
4063 * @param pVM The VM handle.
4064 */
4065VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
4066{
4067 return pVM->pUVM->vm.s.cRuntimeErrors;
4068}
4069
4070
4071/**
4072 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4073 *
4074 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4075 *
4076 * @param pVM The VM handle.
4077 */
4078VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4079{
4080 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4081 return pUVCpu
4082 ? pUVCpu->idCpu
4083 : NIL_VMCPUID;
4084}
4085
4086
4087/**
4088 * Returns the native handle of the current EMT VMCPU thread.
4089 *
4090 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4091 * @param pVM The VM handle.
4092 * @thread EMT
4093 */
4094VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4095{
4096 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4097
4098 if (!pUVCpu)
4099 return NIL_RTNATIVETHREAD;
4100
4101 return pUVCpu->vm.s.NativeThreadEMT;
4102}
4103
4104
4105/**
4106 * Returns the native handle of the current EMT VMCPU thread.
4107 *
4108 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4109 * @param pVM The VM handle.
4110 * @thread EMT
4111 */
4112VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4113{
4114 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4115
4116 if (!pUVCpu)
4117 return NIL_RTNATIVETHREAD;
4118
4119 return pUVCpu->vm.s.NativeThreadEMT;
4120}
4121
4122
4123/**
4124 * Returns the handle of the current EMT VMCPU thread.
4125 *
4126 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4127 * @param pVM The VM handle.
4128 * @thread EMT
4129 */
4130VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
4131{
4132 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4133
4134 if (!pUVCpu)
4135 return NIL_RTTHREAD;
4136
4137 return pUVCpu->vm.s.ThreadEMT;
4138}
4139
4140
4141/**
4142 * Returns the handle of the current EMT VMCPU thread.
4143 *
4144 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4145 * @param pVM The VM handle.
4146 * @thread EMT
4147 */
4148VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
4149{
4150 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4151
4152 if (!pUVCpu)
4153 return NIL_RTTHREAD;
4154
4155 return pUVCpu->vm.s.ThreadEMT;
4156}
4157
4158
4159/**
4160 * Return the package and core id of a CPU.
4161 *
4162 * @returns VBOX status code.
4163 * @param pVM The VM to operate on.
4164 * @param idCpu Virtual CPU to get the ID from.
4165 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4166 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4167 *
4168 */
4169VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PVM pVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4170{
4171 if (idCpu >= pVM->cCpus)
4172 return VERR_INVALID_CPU_ID;
4173
4174#ifdef VBOX_WITH_MULTI_CORE
4175 *pidCpuCore = idCpu;
4176 *pidCpuPackage = 0;
4177#else
4178 *pidCpuCore = 0;
4179 *pidCpuPackage = idCpu;
4180#endif
4181
4182 return VINF_SUCCESS;
4183}
4184
4185
4186/**
4187 * Worker for VMR3HotUnplugCpu.
4188 *
4189 * @returns VINF_EM_WAIT_SPIP (strict status code).
4190 * @param pVM The VM handle.
4191 * @param idCpu The current CPU.
4192 */
4193static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4194{
4195 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4196 VMCPU_ASSERT_EMT(pVCpu);
4197
4198 /*
4199 * Reset per CPU resources.
4200 *
4201 * Actually only needed for VT-x because the CPU seems to be still in some
4202 * paged mode and startup fails after a new hot plug event. SVM works fine
4203 * even without this.
4204 */
4205 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4206 PGMR3ResetUnpluggedCpu(pVM, pVCpu);
4207 PDMR3ResetCpu(pVCpu);
4208 TRPMR3ResetCpu(pVCpu);
4209 CPUMR3ResetCpu(pVCpu);
4210 EMR3ResetCpu(pVCpu);
4211 HWACCMR3ResetCpu(pVCpu);
4212 return VINF_EM_WAIT_SIPI;
4213}
4214
4215
4216/**
4217 * Hot-unplugs a CPU from the guest.
4218 *
4219 * @returns VBox status code.
4220 * @param pVM The VM to operate on.
4221 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4222 */
4223VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4224{
4225 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4226
4227 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4228 * broadcast requests. Just note down somewhere that the CPU is
4229 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4230 * it out of the EM loops when offline. */
4231 return VMR3ReqCallNoWaitU(pVM->pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4232}
4233
4234
4235/**
4236 * Hot-plugs a CPU on the guest.
4237 *
4238 * @returns VBox status code.
4239 * @param pVM The VM to operate on.
4240 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4241 */
4242VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
4243{
4244 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4245
4246 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4247 return VINF_SUCCESS;
4248}
4249
4250
4251/**
4252 * Changes the VMM execution cap.
4253 *
4254 * @returns VBox status code.
4255 * @param pVM The VM to operate on.
4256 * @param ulCpuExecutionCap New CPU execution cap
4257 */
4258VMMR3DECL(int) VMR3SetCpuExecutionCap(PVM pVM, unsigned ulCpuExecutionCap)
4259{
4260 AssertReturn(ulCpuExecutionCap > 0 && ulCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4261
4262 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", ulCpuExecutionCap));
4263 /* Note: not called from EMT. */
4264 pVM->uCpuExecutionCap = ulCpuExecutionCap;
4265 return VINF_SUCCESS;
4266}
4267
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette