VirtualBox

source: vbox/trunk/src/VBox/VMM/VM.cpp@ 23107

Last change on this file since 23107 was 23042, checked in by vboxsync, 15 years ago

VM.cpp: Note about ResetLS/ResettingLS.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 125.1 KB
Line 
1/* $Id: VM.cpp 23042 2009-09-15 20:19:17Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_vm VM API
23 *
24 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
25 * use to create a VMM instance for running a guest in. It also provides
26 * facilities for queuing request for execution in EMT (serialization purposes
27 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
28 *
29 *
30 * @section sec_vm_design Design Critique / Things To Do
31 *
32 * In hindsight this component is a big design mistake, all this stuff really
33 * belongs in the VMM component. It just seemed like a kind of ok idea at a
34 * time when the VMM bit was a kind of vague. 'VM' also happend to be the name
35 * of the per-VM instance structure (see vm.h), so it kind of made sense.
36 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
37 * is some minor functionally and some "routing" services.
38 *
39 * Fixing this is just a matter of some more or less straight forward
40 * refactoring, the question is just when someone will get to it. Moving the EMT
41 * would be a good start.
42 *
43 */
44
45/*******************************************************************************
46* Header Files *
47*******************************************************************************/
48#define LOG_GROUP LOG_GROUP_VM
49#include <VBox/cfgm.h>
50#include <VBox/vmm.h>
51#include <VBox/gvmm.h>
52#include <VBox/mm.h>
53#include <VBox/cpum.h>
54#include <VBox/selm.h>
55#include <VBox/trpm.h>
56#include <VBox/dbgf.h>
57#include <VBox/pgm.h>
58#include <VBox/pdmapi.h>
59#include <VBox/pdmcritsect.h>
60#include <VBox/em.h>
61#include <VBox/rem.h>
62#include <VBox/tm.h>
63#include <VBox/stam.h>
64#include <VBox/patm.h>
65#ifdef VBOX_WITH_VMI
66# include <VBox/parav.h>
67#endif
68#include <VBox/csam.h>
69#include <VBox/iom.h>
70#include <VBox/ssm.h>
71#include <VBox/hwaccm.h>
72#include "VMInternal.h"
73#include <VBox/vm.h>
74#include <VBox/uvm.h>
75
76#include <VBox/sup.h>
77#include <VBox/dbg.h>
78#include <VBox/err.h>
79#include <VBox/param.h>
80#include <VBox/log.h>
81#include <iprt/assert.h>
82#include <iprt/alloc.h>
83#include <iprt/asm.h>
84#include <iprt/env.h>
85#include <iprt/string.h>
86#include <iprt/time.h>
87#include <iprt/semaphore.h>
88#include <iprt/thread.h>
89
90
91/*******************************************************************************
92* Structures and Typedefs *
93*******************************************************************************/
94/**
95 * VM destruction callback registration record.
96 */
97typedef struct VMATDTOR
98{
99 /** Pointer to the next record in the list. */
100 struct VMATDTOR *pNext;
101 /** Pointer to the callback function. */
102 PFNVMATDTOR pfnAtDtor;
103 /** The user argument. */
104 void *pvUser;
105} VMATDTOR;
106/** Pointer to a VM destruction callback registration record. */
107typedef VMATDTOR *PVMATDTOR;
108
109
110/*******************************************************************************
111* Global Variables *
112*******************************************************************************/
113/** Pointer to the list of VMs. */
114static PUVM g_pUVMsHead = NULL;
115
116/** Pointer to the list of at VM destruction callbacks. */
117static PVMATDTOR g_pVMAtDtorHead = NULL;
118/** Lock the g_pVMAtDtorHead list. */
119#define VM_ATDTOR_LOCK() do { } while (0)
120/** Unlock the g_pVMAtDtorHead list. */
121#define VM_ATDTOR_UNLOCK() do { } while (0)
122
123
124/*******************************************************************************
125* Internal Functions *
126*******************************************************************************/
127static int vmR3CreateUVM(uint32_t cCpus, PUVM *ppUVM);
128static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
129static int vmR3InitRing3(PVM pVM, PUVM pUVM);
130static int vmR3InitVMCpu(PVM pVM);
131static int vmR3InitRing0(PVM pVM);
132static int vmR3InitGC(PVM pVM);
133static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
134static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
135static DECLCALLBACK(int) vmR3PowerOff(PVM pVM);
136static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
137static void vmR3AtDtor(PVM pVM);
138static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
139static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
140static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
141static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
142static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
143static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
144
145
146/**
147 * Do global VMM init.
148 *
149 * @returns VBox status code.
150 */
151VMMR3DECL(int) VMR3GlobalInit(void)
152{
153 /*
154 * Only once.
155 */
156 static bool volatile s_fDone = false;
157 if (s_fDone)
158 return VINF_SUCCESS;
159
160 /*
161 * We're done.
162 */
163 s_fDone = true;
164 return VINF_SUCCESS;
165}
166
167
168
169/**
170 * Creates a virtual machine by calling the supplied configuration constructor.
171 *
172 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
173 * called to start the execution.
174 *
175 * @returns 0 on success.
176 * @returns VBox error code on failure.
177 * @param cCpus Number of virtual CPUs for the new VM.
178 * @param pfnVMAtError Pointer to callback function for setting VM
179 * errors. This was added as an implicit call to
180 * VMR3AtErrorRegister() since there is no way the
181 * caller can get to the VM handle early enough to
182 * do this on its own.
183 * This is called in the context of an EMT.
184 * @param pvUserVM The user argument passed to pfnVMAtError.
185 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
186 * This is called in the context of an EMT0.
187 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
188 * @param ppVM Where to store the 'handle' of the created VM.
189 */
190VMMR3DECL(int) VMR3Create(uint32_t cCpus, PFNVMATERROR pfnVMAtError, void *pvUserVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM, PVM *ppVM)
191{
192 LogFlow(("VMR3Create: cCpus=%RU32 pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
193 cCpus, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
194
195 /*
196 * Because of the current hackiness of the applications
197 * we'll have to initialize global stuff from here.
198 * Later the applications will take care of this in a proper way.
199 */
200 static bool fGlobalInitDone = false;
201 if (!fGlobalInitDone)
202 {
203 int rc = VMR3GlobalInit();
204 if (RT_FAILURE(rc))
205 return rc;
206 fGlobalInitDone = true;
207 }
208
209 /*
210 * Validate input.
211 */
212 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
213
214 /*
215 * Create the UVM so we can register the at-error callback
216 * and consoliate a bit of cleanup code.
217 */
218 PUVM pUVM = NULL; /* shuts up gcc */
219 int rc = vmR3CreateUVM(cCpus, &pUVM);
220 if (RT_FAILURE(rc))
221 return rc;
222 if (pfnVMAtError)
223 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
224 if (RT_SUCCESS(rc))
225 {
226 /*
227 * Initialize the support library creating the session for this VM.
228 */
229 rc = SUPR3Init(&pUVM->vm.s.pSession);
230 if (RT_SUCCESS(rc))
231 {
232 /*
233 * Call vmR3CreateU in the EMT thread and wait for it to finish.
234 *
235 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
236 * submitting a request to a specific VCPU without a pVM. So, to make
237 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
238 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
239 */
240 PVMREQ pReq;
241 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
242 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
243 if (RT_SUCCESS(rc))
244 {
245 rc = pReq->iStatus;
246 VMR3ReqFree(pReq);
247 if (RT_SUCCESS(rc))
248 {
249 /*
250 * Success!
251 */
252 *ppVM = pUVM->pVM;
253 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
254 return VINF_SUCCESS;
255 }
256 }
257 else
258 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
259
260 /*
261 * An error occurred during VM creation. Set the error message directly
262 * using the initial callback, as the callback list doesn't exist yet.
263 */
264 const char *pszError = NULL;
265 switch (rc)
266 {
267 case VERR_VMX_IN_VMX_ROOT_MODE:
268#ifdef RT_OS_LINUX
269 pszError = N_("VirtualBox can't operate in VMX root mode. "
270 "Please disable the KVM kernel extension, recompile your kernel and reboot");
271#else
272 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
273#endif
274 break;
275
276 case VERR_VERSION_MISMATCH:
277 pszError = N_("VMMR0 driver version mismatch. Please terminate all VMs, make sure that "
278 "VBoxNetDHCP is not running and try again. If you still get this error, "
279 "re-install VirtualBox");
280 break;
281
282#ifdef RT_OS_LINUX
283 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
284 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
285 "that no kernel modules from an older version of VirtualBox exist. "
286 "Then try to recompile and reload the kernel modules by executing "
287 "'/etc/init.d/vboxdrv setup' as root");
288 break;
289#endif
290
291 case VERR_RAW_MODE_INVALID_SMP:
292 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
293 "VirtualBox requires this hardware extension to emulate more than one "
294 "guest CPU");
295 break;
296
297 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
298#ifdef RT_OS_LINUX
299 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
300 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
301 "the VT-x extension in the VM settings. Note that without VT-x you have "
302 "to reduce the number of guest CPUs to one");
303#else
304 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
305 "extension. Either upgrade your kernel or disable the VT-x extension in the "
306 "VM settings. Note that without VT-x you have to reduce the number of guest "
307 "CPUs to one");
308#endif
309 break;
310
311 default:
312 pszError = N_("Unknown error creating VM");
313 break;
314 }
315 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
316 }
317 else
318 {
319 /*
320 * An error occurred at support library initialization time (before the
321 * VM could be created). Set the error message directly using the
322 * initial callback, as the callback list doesn't exist yet.
323 */
324 const char *pszError;
325 switch (rc)
326 {
327 case VERR_VM_DRIVER_LOAD_ERROR:
328#ifdef RT_OS_LINUX
329 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
330 "was either not loaded or /dev/vboxdrv is not set up properly. "
331 "Re-setup the kernel module by executing "
332 "'/etc/init.d/vboxdrv setup' as root");
333#else
334 pszError = N_("VirtualBox kernel driver not loaded");
335#endif
336 break;
337 case VERR_VM_DRIVER_OPEN_ERROR:
338 pszError = N_("VirtualBox kernel driver cannot be opened");
339 break;
340 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
341#ifdef VBOX_WITH_HARDENING
342 /* This should only happen if the executable wasn't hardened - bad code/build. */
343 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
344 "Re-install VirtualBox. If you are building it yourself, you "
345 "should make sure it installed correctly and that the setuid "
346 "bit is set on the executables calling VMR3Create.");
347#else
348 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
349# if defined(RT_OS_DARWIN)
350 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
351 "If you have built VirtualBox yourself, make sure that you do not "
352 "have the vboxdrv KEXT from a different build or installation loaded.");
353# elif defined(RT_OS_LINUX)
354 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
355 "If you have built VirtualBox yourself, make sure that you do "
356 "not have the vboxdrv kernel module from a different build or "
357 "installation loaded. Also, make sure the vboxdrv udev rule gives "
358 "you the permission you need to access the device.");
359# elif defined(RT_OS_WINDOWS)
360 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
361# else /* solaris, freebsd, ++. */
362 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
363 "If you have built VirtualBox yourself, make sure that you do "
364 "not have the vboxdrv kernel module from a different install loaded.");
365# endif
366#endif
367 break;
368 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
369 case VERR_VM_DRIVER_NOT_INSTALLED:
370#ifdef RT_OS_LINUX
371 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
372 "was either not loaded or /dev/vboxdrv was not created for some "
373 "reason. Re-setup the kernel module by executing "
374 "'/etc/init.d/vboxdrv setup' as root");
375#else
376 pszError = N_("VirtualBox kernel driver not installed");
377#endif
378 break;
379 case VERR_NO_MEMORY:
380 pszError = N_("VirtualBox support library out of memory");
381 break;
382 case VERR_VERSION_MISMATCH:
383 case VERR_VM_DRIVER_VERSION_MISMATCH:
384 pszError = N_("The VirtualBox support driver which is running is from a different "
385 "version of VirtualBox. You can correct this by stopping all "
386 "running instances of VirtualBox and reinstalling the software.");
387 break;
388 default:
389 pszError = N_("Unknown error initializing kernel driver");
390 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
391 }
392 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
393 }
394 }
395
396 /* cleanup */
397 vmR3DestroyUVM(pUVM, 2000);
398 LogFlow(("VMR3Create: returns %Rrc\n", rc));
399 return rc;
400}
401
402
403/**
404 * Creates the UVM.
405 *
406 * This will not initialize the support library even if vmR3DestroyUVM
407 * will terminate that.
408 *
409 * @returns VBox status code.
410 * @param cCpus Number of virtual CPUs
411 * @param ppUVM Where to store the UVM pointer.
412 */
413static int vmR3CreateUVM(uint32_t cCpus, PUVM *ppUVM)
414{
415 uint32_t i;
416
417 /*
418 * Create and initialize the UVM.
419 */
420 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
421 AssertReturn(pUVM, VERR_NO_MEMORY);
422 pUVM->u32Magic = UVM_MAGIC;
423 pUVM->cCpus = cCpus;
424
425 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
426
427 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
428 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
429 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
430
431 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
432
433 /* Initialize the VMCPU array in the UVM. */
434 for (i = 0; i < cCpus; i++)
435 {
436 pUVM->aCpus[i].pUVM = pUVM;
437 pUVM->aCpus[i].idCpu = i;
438 }
439
440 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
441 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
442 AssertRC(rc);
443 if (RT_SUCCESS(rc))
444 {
445 /* Allocate a halt method event semaphore for each VCPU. */
446 for (i = 0; i < cCpus; i++)
447 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
448 for (i = 0; i < cCpus; i++)
449 {
450 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
451 if (RT_FAILURE(rc))
452 break;
453 }
454 if (RT_SUCCESS(rc))
455 {
456 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
457 if (RT_SUCCESS(rc))
458 {
459 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
460 if (RT_SUCCESS(rc))
461 {
462 /*
463 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
464 */
465 rc = STAMR3InitUVM(pUVM);
466 if (RT_SUCCESS(rc))
467 {
468 rc = MMR3InitUVM(pUVM);
469 if (RT_SUCCESS(rc))
470 {
471 rc = PDMR3InitUVM(pUVM);
472 if (RT_SUCCESS(rc))
473 {
474 /*
475 * Start the emulation threads for all VMCPUs.
476 */
477 for (i = 0; i < cCpus; i++)
478 {
479 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
480 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
481 cCpus > 1 ? "EMT-%u" : "EMT", i);
482 if (RT_FAILURE(rc))
483 break;
484
485 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
486 }
487
488 if (RT_SUCCESS(rc))
489 {
490 *ppUVM = pUVM;
491 return VINF_SUCCESS;
492 }
493
494 /* bail out. */
495 while (i-- > 0)
496 {
497 /** @todo rainy day: terminate the EMTs. */
498 }
499 PDMR3TermUVM(pUVM);
500 }
501 MMR3TermUVM(pUVM);
502 }
503 STAMR3TermUVM(pUVM);
504 }
505 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
506 }
507 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
508 }
509 }
510 for (i = 0; i < cCpus; i++)
511 {
512 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
513 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
514 }
515 RTTlsFree(pUVM->vm.s.idxTLS);
516 }
517 RTMemPageFree(pUVM);
518 return rc;
519}
520
521
522/**
523 * Creates and initializes the VM.
524 *
525 * @thread EMT
526 */
527static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
528{
529 int rc = VINF_SUCCESS;
530
531 /*
532 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
533 */
534 rc = PDMR3LdrLoadVMMR0U(pUVM);
535 if (RT_FAILURE(rc))
536 {
537 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
538 * bird: what about moving the message down here? Main picks the first message, right? */
539 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
540 return rc; /* proper error message set later on */
541 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
542 }
543
544 /*
545 * Request GVMM to create a new VM for us.
546 */
547 GVMMCREATEVMREQ CreateVMReq;
548 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
549 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
550 CreateVMReq.pSession = pUVM->vm.s.pSession;
551 CreateVMReq.pVMR0 = NIL_RTR0PTR;
552 CreateVMReq.pVMR3 = NULL;
553 CreateVMReq.cCpus = cCpus;
554 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
555 if (RT_SUCCESS(rc))
556 {
557 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
558 AssertRelease(VALID_PTR(pVM));
559 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
560 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
561 AssertRelease(pVM->cCpus == cCpus);
562 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
563
564 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
565 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
566
567 /*
568 * Initialize the VM structure and our internal data (VMINT).
569 */
570 pVM->pUVM = pUVM;
571
572 for (VMCPUID i = 0; i < pVM->cCpus; i++)
573 {
574 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
575 pVM->aCpus[i].idCpu = i;
576 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
577 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
578
579 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
580 pUVM->aCpus[i].pVM = pVM;
581 }
582
583
584 /*
585 * Init the configuration.
586 */
587 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
588 if (RT_SUCCESS(rc))
589 {
590 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
591 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
592 pVM->fHWACCMEnabled = true;
593
594 /*
595 * If executing in fake suplib mode disable RR3 and RR0 in the config.
596 */
597 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
598 if (psz && !strcmp(psz, "fake"))
599 {
600 CFGMR3RemoveValue(CFGMR3GetRoot(pVM), "RawR3Enabled");
601 CFGMR3InsertInteger(CFGMR3GetRoot(pVM), "RawR3Enabled", 0);
602 CFGMR3RemoveValue(CFGMR3GetRoot(pVM), "RawR0Enabled");
603 CFGMR3InsertInteger(CFGMR3GetRoot(pVM), "RawR0Enabled", 0);
604 }
605
606 /*
607 * Make sure the CPU count in the config data matches.
608 */
609 if (RT_SUCCESS(rc))
610 {
611 uint32_t cCPUsCfg;
612 rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "NumCPUs", &cCPUsCfg, 1);
613 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
614 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
615 {
616 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
617 cCPUsCfg, cCpus));
618 rc = VERR_INVALID_PARAMETER;
619 }
620 }
621 if (RT_SUCCESS(rc))
622 {
623 /*
624 * Init the ring-3 components and ring-3 per cpu data, finishing it off
625 * by a relocation round (intermediate context finalization will do this).
626 */
627 rc = vmR3InitRing3(pVM, pUVM);
628 if (RT_SUCCESS(rc))
629 {
630 rc = vmR3InitVMCpu(pVM);
631 if (RT_SUCCESS(rc))
632 rc = PGMR3FinalizeMappings(pVM);
633 if (RT_SUCCESS(rc))
634 {
635
636 LogFlow(("Ring-3 init succeeded\n"));
637
638 /*
639 * Init the Ring-0 components.
640 */
641 rc = vmR3InitRing0(pVM);
642 if (RT_SUCCESS(rc))
643 {
644 /* Relocate again, because some switcher fixups depends on R0 init results. */
645 VMR3Relocate(pVM, 0);
646
647#ifdef VBOX_WITH_DEBUGGER
648 /*
649 * Init the tcp debugger console if we're building
650 * with debugger support.
651 */
652 void *pvUser = NULL;
653 rc = DBGCTcpCreate(pVM, &pvUser);
654 if ( RT_SUCCESS(rc)
655 || rc == VERR_NET_ADDRESS_IN_USE)
656 {
657 pUVM->vm.s.pvDBGC = pvUser;
658#endif
659 /*
660 * Init the Guest Context components.
661 */
662 rc = vmR3InitGC(pVM);
663 if (RT_SUCCESS(rc))
664 {
665 /*
666 * Now we can safely set the VM halt method to default.
667 */
668 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
669 if (RT_SUCCESS(rc))
670 {
671 /*
672 * Set the state and link into the global list.
673 */
674 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
675 pUVM->pNext = g_pUVMsHead;
676 g_pUVMsHead = pUVM;
677
678#ifdef LOG_ENABLED
679 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
680#endif
681 return VINF_SUCCESS;
682 }
683 }
684#ifdef VBOX_WITH_DEBUGGER
685 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
686 pUVM->vm.s.pvDBGC = NULL;
687 }
688#endif
689 //..
690 }
691 }
692 vmR3Destroy(pVM);
693 }
694 }
695 //..
696
697 /* Clean CFGM. */
698 int rc2 = CFGMR3Term(pVM);
699 AssertRC(rc2);
700 }
701
702 /*
703 * Drop all references to VM and the VMCPU structures, then
704 * tell GVMM to destroy the VM.
705 */
706 pUVM->pVM = NULL;
707 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
708 {
709 pUVM->aCpus[i].pVM = NULL;
710 pUVM->aCpus[i].pVCpu = NULL;
711 }
712 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
713
714 if (pUVM->cCpus > 1)
715 {
716 /* Poke the other EMTs since they may have stale pVM and pVCpu references
717 on the stack (see VMR3WaitU for instance) if they've been awakened after
718 VM creation. */
719 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
720 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
721 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
722 }
723
724 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
725 AssertRC(rc2);
726 }
727 else
728 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
729
730 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
731 return rc;
732}
733
734
735/**
736 * Register the calling EMT with GVM.
737 *
738 * @returns VBox status code.
739 * @param pVM The VM handle.
740 * @param idCpu The Virtual CPU ID.
741 */
742static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
743{
744 Assert(VMMGetCpuId(pVM) == idCpu);
745 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
746 if (RT_FAILURE(rc))
747 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
748 return rc;
749}
750
751
752/**
753 * Initializes all R3 components of the VM
754 */
755static int vmR3InitRing3(PVM pVM, PUVM pUVM)
756{
757 int rc;
758
759 /*
760 * Register the other EMTs with GVM.
761 */
762 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
763 {
764 rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
765 if (RT_FAILURE(rc))
766 return rc;
767 }
768
769 /*
770 * Init all R3 components, the order here might be important.
771 */
772 rc = MMR3Init(pVM);
773 if (RT_SUCCESS(rc))
774 {
775 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
776 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
777 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
778 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
779 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
780 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
781 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
782 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
783 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
784 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
785 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
786 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
787 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
788 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
789
790 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
791 {
792 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
793 AssertRC(rc);
794 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
795 AssertRC(rc);
796 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
797 AssertRC(rc);
798 }
799
800 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
801 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
802 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
803 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
804 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
805
806 rc = CPUMR3Init(pVM);
807 if (RT_SUCCESS(rc))
808 {
809 rc = HWACCMR3Init(pVM);
810 if (RT_SUCCESS(rc))
811 {
812 rc = PGMR3Init(pVM);
813 if (RT_SUCCESS(rc))
814 {
815 rc = REMR3Init(pVM);
816 if (RT_SUCCESS(rc))
817 {
818 rc = MMR3InitPaging(pVM);
819 if (RT_SUCCESS(rc))
820 rc = TMR3Init(pVM);
821 if (RT_SUCCESS(rc))
822 {
823 rc = VMMR3Init(pVM);
824 if (RT_SUCCESS(rc))
825 {
826 rc = SELMR3Init(pVM);
827 if (RT_SUCCESS(rc))
828 {
829 rc = TRPMR3Init(pVM);
830 if (RT_SUCCESS(rc))
831 {
832 rc = CSAMR3Init(pVM);
833 if (RT_SUCCESS(rc))
834 {
835 rc = PATMR3Init(pVM);
836 if (RT_SUCCESS(rc))
837 {
838#ifdef VBOX_WITH_VMI
839 rc = PARAVR3Init(pVM);
840 if (RT_SUCCESS(rc))
841 {
842#endif
843 rc = IOMR3Init(pVM);
844 if (RT_SUCCESS(rc))
845 {
846 rc = EMR3Init(pVM);
847 if (RT_SUCCESS(rc))
848 {
849 rc = DBGFR3Init(pVM);
850 if (RT_SUCCESS(rc))
851 {
852 rc = PDMR3Init(pVM);
853 if (RT_SUCCESS(rc))
854 {
855 rc = PGMR3InitDynMap(pVM);
856 if (RT_SUCCESS(rc))
857 rc = MMR3HyperInitFinalize(pVM);
858 if (RT_SUCCESS(rc))
859 rc = PATMR3InitFinalize(pVM);
860 if (RT_SUCCESS(rc))
861 rc = PGMR3InitFinalize(pVM);
862 if (RT_SUCCESS(rc))
863 rc = SELMR3InitFinalize(pVM);
864 if (RT_SUCCESS(rc))
865 rc = TMR3InitFinalize(pVM);
866 if (RT_SUCCESS(rc))
867 rc = VMMR3InitFinalize(pVM);
868 if (RT_SUCCESS(rc))
869 rc = REMR3InitFinalize(pVM);
870 if (RT_SUCCESS(rc))
871 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
872 if (RT_SUCCESS(rc))
873 {
874 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
875 return VINF_SUCCESS;
876 }
877 int rc2 = PDMR3Term(pVM);
878 AssertRC(rc2);
879 }
880 int rc2 = DBGFR3Term(pVM);
881 AssertRC(rc2);
882 }
883 int rc2 = EMR3Term(pVM);
884 AssertRC(rc2);
885 }
886 int rc2 = IOMR3Term(pVM);
887 AssertRC(rc2);
888 }
889#ifdef VBOX_WITH_VMI
890 int rc2 = PARAVR3Term(pVM);
891 AssertRC(rc2);
892 }
893#endif
894 int rc2 = PATMR3Term(pVM);
895 AssertRC(rc2);
896 }
897 int rc2 = CSAMR3Term(pVM);
898 AssertRC(rc2);
899 }
900 int rc2 = TRPMR3Term(pVM);
901 AssertRC(rc2);
902 }
903 int rc2 = SELMR3Term(pVM);
904 AssertRC(rc2);
905 }
906 int rc2 = VMMR3Term(pVM);
907 AssertRC(rc2);
908 }
909 int rc2 = TMR3Term(pVM);
910 AssertRC(rc2);
911 }
912 int rc2 = REMR3Term(pVM);
913 AssertRC(rc2);
914 }
915 int rc2 = PGMR3Term(pVM);
916 AssertRC(rc2);
917 }
918 int rc2 = HWACCMR3Term(pVM);
919 AssertRC(rc2);
920 }
921 //int rc2 = CPUMR3Term(pVM);
922 //AssertRC(rc2);
923 }
924 /* MMR3Term is not called here because it'll kill the heap. */
925 }
926
927 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
928 return rc;
929}
930
931
932/**
933 * Initializes all VM CPU components of the VM
934 */
935static int vmR3InitVMCpu(PVM pVM)
936{
937 int rc = VINF_SUCCESS;
938 int rc2;
939
940 rc = CPUMR3InitCPU(pVM);
941 if (RT_SUCCESS(rc))
942 {
943 rc = HWACCMR3InitCPU(pVM);
944 if (RT_SUCCESS(rc))
945 {
946 rc = PGMR3InitCPU(pVM);
947 if (RT_SUCCESS(rc))
948 {
949 rc = TMR3InitCPU(pVM);
950 if (RT_SUCCESS(rc))
951 {
952 rc = VMMR3InitCPU(pVM);
953 if (RT_SUCCESS(rc))
954 {
955 rc = EMR3InitCPU(pVM);
956 if (RT_SUCCESS(rc))
957 {
958 LogFlow(("vmR3InitVMCpu: returns %Rrc\n", VINF_SUCCESS));
959 return VINF_SUCCESS;
960 }
961
962 rc2 = VMMR3TermCPU(pVM);
963 AssertRC(rc2);
964 }
965 rc2 = TMR3TermCPU(pVM);
966 AssertRC(rc2);
967 }
968 rc2 = PGMR3TermCPU(pVM);
969 AssertRC(rc2);
970 }
971 rc2 = HWACCMR3TermCPU(pVM);
972 AssertRC(rc2);
973 }
974 rc2 = CPUMR3TermCPU(pVM);
975 AssertRC(rc2);
976 }
977 LogFlow(("vmR3InitVMCpu: returns %Rrc\n", rc));
978 return rc;
979}
980
981
982/**
983 * Initializes all R0 components of the VM
984 */
985static int vmR3InitRing0(PVM pVM)
986{
987 LogFlow(("vmR3InitRing0:\n"));
988
989 /*
990 * Check for FAKE suplib mode.
991 */
992 int rc = VINF_SUCCESS;
993 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
994 if (!psz || strcmp(psz, "fake"))
995 {
996 /*
997 * Call the VMMR0 component and let it do the init.
998 */
999 rc = VMMR3InitR0(pVM);
1000 }
1001 else
1002 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1003
1004 /*
1005 * Do notifications and return.
1006 */
1007 if (RT_SUCCESS(rc))
1008 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1009
1010 /** todo: move this to the VMINITCOMPLETED_RING0 notification handler once implemented */
1011 if (RT_SUCCESS(rc))
1012 rc = HWACCMR3InitFinalizeR0(pVM);
1013
1014 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1015 return rc;
1016}
1017
1018
1019/**
1020 * Initializes all GC components of the VM
1021 */
1022static int vmR3InitGC(PVM pVM)
1023{
1024 LogFlow(("vmR3InitGC:\n"));
1025
1026 /*
1027 * Check for FAKE suplib mode.
1028 */
1029 int rc = VINF_SUCCESS;
1030 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1031 if (!psz || strcmp(psz, "fake"))
1032 {
1033 /*
1034 * Call the VMMR0 component and let it do the init.
1035 */
1036 rc = VMMR3InitRC(pVM);
1037 }
1038 else
1039 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1040
1041 /*
1042 * Do notifications and return.
1043 */
1044 if (RT_SUCCESS(rc))
1045 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1046 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1047 return rc;
1048}
1049
1050
1051/**
1052 * Do init completed notifications.
1053 * This notifications can fail.
1054 *
1055 * @param pVM The VM handle.
1056 * @param enmWhat What's completed.
1057 */
1058static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1059{
1060 return VINF_SUCCESS;
1061}
1062
1063
1064/**
1065 * Logger callback for inserting a custom prefix.
1066 *
1067 * @returns Number of chars written.
1068 * @param pLogger The logger.
1069 * @param pchBuf The output buffer.
1070 * @param cchBuf The output buffer size.
1071 * @param pvUser Pointer to the UVM structure.
1072 */
1073static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1074{
1075 AssertReturn(cchBuf >= 2, 0);
1076 PUVM pUVM = (PUVM)pvUser;
1077 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1078 if (pUVCpu)
1079 {
1080 static const char s_szHex[17] = "0123456789abcdef";
1081 VMCPUID const idCpu = pUVCpu->idCpu;
1082 pchBuf[1] = s_szHex[ idCpu & 15];
1083 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1084 }
1085 else
1086 {
1087 pchBuf[0] = 'x';
1088 pchBuf[1] = 'y';
1089 }
1090
1091 return 2;
1092}
1093
1094
1095/**
1096 * Calls the relocation functions for all VMM components so they can update
1097 * any GC pointers. When this function is called all the basic VM members
1098 * have been updated and the actual memory relocation have been done
1099 * by the PGM/MM.
1100 *
1101 * This is used both on init and on runtime relocations.
1102 *
1103 * @param pVM VM handle.
1104 * @param offDelta Relocation delta relative to old location.
1105 */
1106VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1107{
1108 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1109
1110 /*
1111 * The order here is very important!
1112 */
1113 PGMR3Relocate(pVM, offDelta);
1114 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1115 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1116 CPUMR3Relocate(pVM);
1117 HWACCMR3Relocate(pVM);
1118 SELMR3Relocate(pVM);
1119 VMMR3Relocate(pVM, offDelta);
1120 SELMR3Relocate(pVM); /* !hack! fix stack! */
1121 TRPMR3Relocate(pVM, offDelta);
1122 PATMR3Relocate(pVM);
1123 CSAMR3Relocate(pVM, offDelta);
1124 IOMR3Relocate(pVM, offDelta);
1125 EMR3Relocate(pVM);
1126 TMR3Relocate(pVM, offDelta);
1127 DBGFR3Relocate(pVM, offDelta);
1128 PDMR3Relocate(pVM, offDelta);
1129}
1130
1131
1132/**
1133 * Power on the virtual machine.
1134 *
1135 * @returns 0 on success.
1136 * @returns VBox error code on failure.
1137 * @param pVM VM to power on.
1138 * @thread EMT
1139 */
1140static DECLCALLBACK(int) vmR3PowerOn(PVM pVM)
1141{
1142 LogFlow(("vmR3PowerOn: pVM=%p\n", pVM));
1143
1144 /*
1145 * EMT(0) does the actual power on work *before* the other EMTs
1146 * get here, they just need to set their state to STARTED so they
1147 * get out of the EMT loop and into EM.
1148 */
1149 PVMCPU pVCpu = VMMGetCpu(pVM);
1150 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1151 if (pVCpu->idCpu != 0)
1152 return VINF_SUCCESS;
1153
1154 /*
1155 * Try change the state.
1156 */
1157 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1158 if (RT_FAILURE(rc))
1159 return rc;
1160
1161 /*
1162 * Change the state, notify the components and resume the execution.
1163 */
1164 PDMR3PowerOn(pVM);
1165 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1166
1167 return VINF_SUCCESS;
1168}
1169
1170
1171/**
1172 * Powers on the virtual machine.
1173 *
1174 * @returns VBox status code.
1175 *
1176 * @param pVM The VM to power on.
1177 *
1178 * @thread Any thread.
1179 * @vmstate Created
1180 * @vmstateto PoweringOn, Running
1181 */
1182VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1183{
1184 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1185 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1186
1187 /*
1188 * Forward the request to the EMTs (EMT(0) first as it does all the
1189 * work upfront).
1190 */
1191 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL, (PFNRT)vmR3PowerOn, 1, pVM);
1192 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1193 return rc;
1194}
1195
1196
1197/**
1198 * EMT worker for vmR3SuspendCommon.
1199 *
1200 * @returns VBox strict status code.
1201 * @retval VINF_EM_SUSPEND.
1202 * @retval VERR_VM_INVALID_VM_STATE.
1203 *
1204 * @param pVM VM to suspend.
1205 * @param fFatal Whether it's a fatal error or normal suspend.
1206 *
1207 * @thread EMT
1208 */
1209static DECLCALLBACK(int) vmR3Suspend(PVM pVM, bool fFatal)
1210{
1211 LogFlow(("vmR3Suspend: pVM=%p\n", pVM));
1212
1213 /*
1214 * The first EMT switches the state to suspending.
1215 */
1216 PVMCPU pVCpu = VMMGetCpu(pVM);
1217 if (pVCpu->idCpu == pVM->cCpus - 1)
1218 {
1219 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1220 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1221 VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS);
1222 if (RT_FAILURE(rc))
1223 return rc;
1224 }
1225
1226 VMSTATE enmVMState = VMR3GetState(pVM);
1227 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1228 || enmVMState == VMSTATE_SUSPENDING_LS,
1229 ("%s\n", VMR3GetStateName(enmVMState)),
1230 VERR_INTERNAL_ERROR_5);
1231
1232 /*
1233 * EMT(0) does the actually suspending *after* all the other CPUs has
1234 * been thru here.
1235 */
1236 if (pVCpu->idCpu == 0)
1237 {
1238 /* Perform suspend notification. */
1239 PDMR3Suspend(pVM);
1240
1241 /*
1242 * Change to the final state. Live saving makes this a wee bit more
1243 * complicated than one would like.
1244 */
1245 PUVM pUVM = pVM->pUVM;
1246 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1247 VMSTATE enmVMState = pVM->enmVMState;
1248 if (enmVMState != VMSTATE_SUSPENDING_LS)
1249 vmR3SetStateLocked(pVM, pUVM, fFatal ? VMSTATE_FATAL_ERROR : VMSTATE_SUSPENDED, VMSTATE_SUSPENDING);
1250 else if (!fFatal)
1251 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1252 else
1253 {
1254 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR_LS, VMSTATE_SUSPENDING_LS);
1255 SSMR3Cancel(pVM);
1256 }
1257 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1258 }
1259
1260 return VINF_EM_SUSPEND;
1261}
1262
1263
1264/**
1265 * Common worker for VMR3Suspend and vmR3SetRuntimeErrorCommon.
1266 *
1267 * They both suspends the VM, but the latter ends up in the VMSTATE_FATAL_ERROR
1268 * instead of VMSTATE_SUSPENDED.
1269 *
1270 * @returns VBox strict status code.
1271 * @param pVM The VM handle.
1272 * @param fFatal Whether it's a fatal error or not.
1273 *
1274 * @thread Any thread.
1275 * @vmstate Running or RunningLS
1276 * @vmstateto Suspending + Suspended/FatalError or SuspendingLS +
1277 * SuspendedLS/FatalErrorLS
1278 */
1279static int vmR3SuspendCommon(PVM pVM, bool fFatal)
1280{
1281 /*
1282 * Forward the operation to EMT in reverse order so EMT(0) can do the
1283 * actual suspending after the other ones have stopped running guest code.
1284 */
1285 return VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Suspend, 2, pVM, fFatal);
1286}
1287
1288
1289/**
1290 * Suspends a running VM.
1291 *
1292 * @returns VBox status code. When called on EMT, this will be a strict status
1293 * code that has to be propagated up the call stack.
1294 *
1295 * @param pVM The VM to suspend.
1296 *
1297 * @thread Any thread.
1298 * @vmstate Running or RunningLS
1299 * @vmstateto Suspending + Suspended or SuspendingLS + SuspendedLS
1300 */
1301VMMR3DECL(int) VMR3Suspend(PVM pVM)
1302{
1303 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1304 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1305 int rc = vmR3SuspendCommon(pVM, false /*fFatal*/);
1306 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1307 return rc;
1308}
1309
1310
1311/**
1312 * Resume VM execution.
1313 *
1314 * @returns 0 on success.
1315 * @returns VBox error code on failure.
1316 * @param pVM The VM to resume.
1317 * @thread EMT
1318 */
1319static DECLCALLBACK(int) vmR3Resume(PVM pVM)
1320{
1321 LogFlow(("vmR3Resume: pVM=%p\n", pVM));
1322
1323 /*
1324 * EMT(0) does all the work *before* the others wake up.
1325 */
1326 PVMCPU pVCpu = VMMGetCpu(pVM);
1327 if (pVCpu->idCpu == 0)
1328 {
1329 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1330 if (RT_FAILURE(rc))
1331 return rc;
1332
1333 /* Perform resume notifications. */
1334 PDMR3Resume(pVM);
1335
1336 /* Advance to the final state. */
1337 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1338 }
1339
1340 /** @todo there is a race here: Someone could suspend, power off, raise a fatal
1341 * error (both kinds), save the vm, or start a live save operation before
1342 * we get here on all CPUs. Only safe way is a cross call, or to make
1343 * the last thread flip the state from Resuming to Running. While the
1344 * latter seems easy and perhaps more attractive, the former might be
1345 * better wrt TSC/TM... */
1346 AssertMsgReturn(VMR3GetState(pVM) == VMSTATE_RUNNING, ("%s\n", VMR3GetStateName(VMR3GetState(pVM))), VERR_VM_INVALID_VM_STATE);
1347 return VINF_EM_RESUME;
1348}
1349
1350
1351
1352
1353/**
1354 * Resume VM execution.
1355 *
1356 * @returns VBox status code. When called on EMT, this will be a strict status
1357 * code that has to be propagated up the call stack.
1358 *
1359 * @param pVM The VM to resume.
1360 *
1361 * @thread Any thread.
1362 * @vmstate Suspended
1363 * @vmstateto Running
1364 */
1365VMMR3DECL(int) VMR3Resume(PVM pVM)
1366{
1367 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1368 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1369
1370 /*
1371 * Forward the request to the EMTs (EMT(0) first as it does all the
1372 * work upfront).
1373 */
1374 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL, (PFNRT)vmR3Resume, 1, pVM);
1375 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1376 return rc;
1377}
1378
1379
1380/**
1381 * Worker for VMR3Save that validates the state and calls SSMR3Save.
1382 *
1383 * @returns VBox status code.
1384 *
1385 * @param pVM The VM handle.
1386 * @param pszFilename The name of the save state file.
1387 * @param enmAfter What to do afterwards.
1388 * @param pfnProgress Progress callback. Optional.
1389 * @param pvUser User argument for the progress callback.
1390 * @param ppSSM Where to return the saved state handle in case of a
1391 * live snapshot scenario.
1392 * @thread EMT
1393 */
1394static DECLCALLBACK(int) vmR3Save(PVM pVM, const char *pszFilename, SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvUser, PSSMHANDLE *ppSSM)
1395{
1396 LogFlow(("vmR3Save: pVM=%p pszFilename=%p:{%s} enmAfter=%d pfnProgress=%p pvUser=%p ppSSM=%p\n", pVM, pszFilename, pszFilename, enmAfter, pfnProgress, pvUser, ppSSM));
1397
1398 /*
1399 * Validate input.
1400 */
1401 AssertPtr(pszFilename);
1402 AssertPtr(pVM);
1403 Assert(enmAfter == SSMAFTER_DESTROY || enmAfter == SSMAFTER_CONTINUE);
1404 AssertPtr(ppSSM);
1405 *ppSSM = NULL;
1406
1407 /*
1408 * Change the state and perform/start the saving.
1409 */
1410 int rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1411 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1412 VMSTATE_RUNNING, VMSTATE_RUNNING_LS);
1413 if (rc == 1)
1414 {
1415 rc = SSMR3Save(pVM, pszFilename, enmAfter, pfnProgress, pvUser);
1416 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1417 }
1418 else if (rc == 2)
1419 {
1420 rc = SSMR3LiveToFile(pVM, pszFilename, enmAfter, pfnProgress, pvUser, ppSSM);
1421 /* (We're not subject to cancellation just yet.) */
1422 }
1423 else
1424 Assert(RT_FAILURE(rc));
1425 return rc;
1426}
1427
1428
1429/**
1430 * Worker for VMR3Save to clean up a SSMR3LiveDoStep1 failure.
1431 *
1432 * We failed after hitting the RunningLS state, but before trying to suspend the
1433 * VM before vmR3SaveLiveStep2. There are a number of state transisions in this
1434 * state, some, like ResetLS, that requires some special handling. (ResetLS is
1435 * the excuse for doing this all on EMT(0).
1436 *
1437 * @returns VBox status code.
1438 *
1439 * @param pVM The VM handle.
1440 * @param pSSM The handle of saved state operation. This will be
1441 * closed.
1442 * @thread EMT(0)
1443 */
1444static DECLCALLBACK(int) vmR3SaveLiveStep1Cleanup(PVM pVM, PSSMHANDLE pSSM)
1445{
1446 LogFlow(("vmR3SaveLiveStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1447 VM_ASSERT_EMT0(pVM);
1448
1449 /*
1450 * Finish the SSM state first (or move the ssmR3SetCancellable call),
1451 * then change the state out of the *LS variants.
1452 */
1453 int rc = SSMR3LiveDone(pSSM);
1454 int rc2 = vmR3TrySetState(pVM, "vmR3SaveLiveStep1Cleanup", 8,
1455 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1456 VMSTATE_RUNNING, VMSTATE_RESET_LS,
1457 VMSTATE_SUSPENDING, VMSTATE_SUSPENDING_LS, /* external*/
1458 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS,
1459 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS,
1460 VMSTATE_POWERING_OFF, VMSTATE_POWERING_OFF_LS,
1461 VMSTATE_OFF, VMSTATE_OFF_LS,
1462 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1463 if (RT_SUCCESS(rc))
1464 {
1465 if (RT_SUCCESS(rc2))
1466 rc = rc2 == 2 /* ResetLS -> Running */ ? VINF_EM_RESUME : VINF_SUCCESS;
1467 else
1468 rc = rc2;
1469 }
1470/** @todo VMR3Reset during live save (ResetLS, ResettingLS) needs to be
1471 * redone. We should suspend the VM after resetting the state, not
1472 * cancelling the save operation. In the live migration scenario we
1473 * would already have transfered most of the state and the little that
1474 * remains after a reset isn't going to be very big and it's not worth
1475 * making special paths for this. In the live snapshot case, there
1476 * would be a gain in that we wouldn't require a potentially large saved
1477 * state file. But that could be handled on VMR3Save return and size
1478 * shouldn't matter much as already mentioned..
1479 *
1480 * Will address this tomorrow. */
1481 return rc;
1482}
1483
1484
1485/**
1486 * Worker for VMR3Save continues a live save on EMT(0).
1487 *
1488 * @returns VBox status code.
1489 *
1490 * @param pVM The VM handle.
1491 * @param pSSM The handle of saved state operation.
1492 * @thread EMT(0)
1493 */
1494static DECLCALLBACK(int) vmR3SaveLiveStep2(PVM pVM, PSSMHANDLE pSSM)
1495{
1496 LogFlow(("vmR3SaveLiveStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1497 VM_ASSERT_EMT0(pVM);
1498
1499 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1500
1501 int rc = SSMR3LiveDoStep2(pSSM);
1502 int rc2 = SSMR3LiveDone(pSSM);
1503 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1504 rc = rc2;
1505
1506 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1507
1508 return rc;
1509}
1510
1511
1512
1513/**
1514 * Save current VM state.
1515 *
1516 * Can be used for both saving the state and creating snapshots.
1517 *
1518 * When called for a VM in the Running state, the saved state is created live
1519 * and the VM is only suspended when the final part of the saving is preformed.
1520 * The VM state will not be restored to Running in this case and it's up to the
1521 * caller to call VMR3Resume if this is desirable. (The rational is that the
1522 * caller probably wish to reconfigure the disks before resuming the VM.)
1523 *
1524 * @returns VBox status code.
1525 *
1526 * @param pVM The VM which state should be saved.
1527 * @param pszFilename The name of the save state file.
1528 * @param fContinueAfterwards Whether continue execution afterwards or not.
1529 * When in doubt, set this to true.
1530 * @param pfnProgress Progress callback. Optional.
1531 * @param pvUser User argument for the progress callback.
1532 *
1533 * @thread Non-EMT.
1534 * @vmstate Suspended or Running
1535 * @vmstateto Saving+Suspended or
1536 * RunningLS+SuspeningLS+SuspendedLS+Saving+Suspended.
1537 */
1538VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser)
1539{
1540 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p\n",
1541 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser));
1542
1543 /*
1544 * Validate input.
1545 */
1546 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1547 VM_ASSERT_OTHER_THREAD(pVM);
1548 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1549 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1550 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1551
1552 /*
1553 * Request the operation in EMT(0).
1554 */
1555 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1556 PSSMHANDLE pSSM;
1557 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1558 (PFNRT)vmR3Save, 6, pVM, pszFilename, enmAfter, pfnProgress, pvUser, &pSSM);
1559 if ( RT_SUCCESS(rc)
1560 && pSSM)
1561 {
1562 /*
1563 * Live snapshot.
1564 *
1565 * The state handling here is kind of tricky, doing it on EMT(0)
1566 * helps abit. See the VMSTATE diagram for details. The EMT(0) calls
1567 * consumes the pSSM handle and calls SSMR3LiveDone.
1568 */
1569 rc = SSMR3LiveDoStep1(pSSM);
1570 if (RT_SUCCESS(rc))
1571 rc = vmR3SuspendCommon(pVM, false /*fFatal*/); /** @todo this races external VMR3Suspend calls and may cause trouble (goes for any VMCPUID_ALL* calls messing with the state in the handler). */
1572 if (RT_SUCCESS(rc))
1573 rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3SaveLiveStep2, 2, pVM, pSSM);
1574 else
1575 {
1576 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3SaveLiveStep1Cleanup, 2, pVM, pSSM);
1577 AssertLogRelRC(rc2);
1578 }
1579 }
1580
1581 LogFlow(("VMR3Save: returns %Rrc\n", rc));
1582 return rc;
1583}
1584
1585
1586/**
1587 * Loads a new VM state.
1588 *
1589 * To restore a saved state on VM startup, call this function and then
1590 * resume the VM instead of powering it on.
1591 *
1592 * @returns VBox status code.
1593 * @param pVM The VM handle.
1594 * @param pszFilename The name of the save state file.
1595 * @param pfnProgress Progress callback. Optional.
1596 * @param pvUser User argument for the progress callback.
1597 * @thread EMT.
1598 */
1599static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
1600{
1601 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n", pVM, pszFilename, pszFilename, pfnProgress, pvUser));
1602
1603 /*
1604 * Validate input (paranoia).
1605 */
1606 AssertPtr(pVM);
1607 AssertPtr(pszFilename);
1608
1609 /*
1610 * Change the state and perform the load.
1611 *
1612 * Always perform a relocation round afterwards to make sure hypervisor
1613 * selectors and such are correct.
1614 */
1615 int rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1616 VMSTATE_LOADING, VMSTATE_CREATED,
1617 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1618 if (RT_FAILURE(rc))
1619 return rc;
1620
1621 rc = SSMR3Load(pVM, pszFilename, SSMAFTER_RESUME, pfnProgress, pvUser);
1622 if (RT_SUCCESS(rc))
1623 {
1624 VMR3Relocate(pVM, 0 /*offDelta*/);
1625 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1626 }
1627 else
1628 {
1629 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1630 rc = VMSetError(pVM, rc, RT_SRC_POS,
1631 N_("Unable to restore the virtual machine's saved state from '%s'. It may be damaged or from an older version of VirtualBox. Please discard the saved state before starting the virtual machine"),
1632 pszFilename);
1633 }
1634
1635 return rc;
1636}
1637
1638
1639/**
1640 * Loads a VM state into a newly created VM or a one that is suspended.
1641 *
1642 * To restore a saved state on VM startup, call this function and then resume
1643 * the VM instead of powering it on.
1644 *
1645 * @returns VBox status code.
1646 *
1647 * @param pVM The VM handle.
1648 * @param pszFilename The name of the save state file.
1649 * @param pfnProgress Progress callback. Optional.
1650 * @param pvUser User argument for the progress callback.
1651 *
1652 * @thread Any thread.
1653 * @vmstate Created, Suspended
1654 * @vmstateto Loading+Suspended
1655 */
1656VMMR3DECL(int) VMR3Load(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
1657{
1658 LogFlow(("VMR3Load: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n", pVM, pszFilename, pszFilename, pfnProgress, pvUser));
1659
1660 /*
1661 * Validate input.
1662 */
1663 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1664 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1665
1666 /*
1667 * Forward the request to EMT(0).
1668 */
1669 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1670 (PFNRT)vmR3Load, 4, pVM, pszFilename, pfnProgress, pvUser);
1671 LogFlow(("VMR3Load: returns %Rrc\n", rc));
1672 return rc;
1673}
1674
1675
1676/**
1677 * Worker for VMR3PowerOff that does the actually powering off on EMT(0) after
1678 * cycling thru the other EMTs first.
1679 *
1680 * @returns VBox strict status code.
1681 *
1682 * @param pVM The VM handle.
1683 *
1684 * @thread EMT.
1685 */
1686static DECLCALLBACK(int) vmR3PowerOff(PVM pVM)
1687{
1688 LogFlow(("vmR3PowerOff: pVM=%p\n", pVM));
1689
1690 /*
1691 * The first EMT thru here will change the state to PoweringOff.
1692 */
1693 PVMCPU pVCpu = VMMGetCpu(pVM);
1694 if (pVCpu->idCpu == pVM->cCpus - 1)
1695 {
1696 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 10,
1697 VMSTATE_POWERING_OFF, VMSTATE_RUNNING,
1698 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED,
1699 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING,
1700 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE,
1701 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION,
1702 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR,
1703 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS,
1704 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS,
1705 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,
1706 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS);
1707 if (RT_FAILURE(rc))
1708 return rc;
1709 }
1710
1711 /*
1712 * Check the state.
1713 */
1714 VMSTATE enmVMState = VMR3GetState(pVM);
1715 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
1716 || enmVMState == VMSTATE_POWERING_OFF_LS,
1717 ("%s\n", VMR3GetStateName(enmVMState)),
1718 VERR_VM_INVALID_VM_STATE);
1719
1720 /*
1721 * EMT(0) does the actual power off work here *after* all the other EMTs
1722 * have been thru and entered the STOPPED state.
1723 */
1724 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
1725 if (pVCpu->idCpu == 0)
1726 {
1727 /*
1728 * For debugging purposes, we will log a summary of the guest state at this point.
1729 */
1730 if (enmVMState != VMSTATE_GURU_MEDITATION)
1731 {
1732 /** @todo SMP support? */
1733 PVMCPU pVCpu = VMMGetCpu(pVM);
1734
1735 /** @todo make the state dumping at VMR3PowerOff optional. */
1736 RTLogRelPrintf("****************** Guest state at power off ******************\n");
1737 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
1738 RTLogRelPrintf("***\n");
1739 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
1740 RTLogRelPrintf("***\n");
1741 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
1742 RTLogRelPrintf("***\n");
1743 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
1744 /** @todo dump guest call stack. */
1745#if 1 // "temporary" while debugging #1589
1746 RTLogRelPrintf("***\n");
1747 uint32_t esp = CPUMGetGuestESP(pVCpu);
1748 if ( CPUMGetGuestSS(pVCpu) == 0
1749 && esp < _64K)
1750 {
1751 uint8_t abBuf[PAGE_SIZE];
1752 RTLogRelPrintf("***\n"
1753 "ss:sp=0000:%04x ", esp);
1754 uint32_t Start = esp & ~(uint32_t)63;
1755 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
1756 if (RT_SUCCESS(rc))
1757 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
1758 "%.*Rhxd\n",
1759 Start, Start + 0x100 - 1,
1760 0x100, abBuf);
1761 else
1762 RTLogRelPrintf("rc=%Rrc\n", rc);
1763
1764 /* grub ... */
1765 if (esp < 0x2000 && esp > 0x1fc0)
1766 {
1767 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
1768 if (RT_SUCCESS(rc))
1769 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
1770 "%.*Rhxd\n",
1771 0x800, abBuf);
1772 }
1773 /* microsoft cdrom hang ... */
1774 if (true)
1775 {
1776 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
1777 if (RT_SUCCESS(rc))
1778 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
1779 "%.*Rhxd\n",
1780 0x200, abBuf);
1781 }
1782 }
1783#endif
1784 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
1785 }
1786
1787 /*
1788 * Perform the power off notifications and advance the state to
1789 * Off or OffLS.
1790 */
1791 PDMR3PowerOff(pVM);
1792
1793 PUVM pUVM = pVM->pUVM;
1794 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1795 enmVMState = pVM->enmVMState;
1796 if (enmVMState == VMSTATE_POWERING_OFF_LS)
1797 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
1798 else
1799 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF);
1800 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1801 }
1802 return VINF_EM_OFF;
1803}
1804
1805
1806/**
1807 * Power off the VM.
1808 *
1809 * @returns VBox status code. When called on EMT, this will be a strict status
1810 * code that has to be propagated up the call stack.
1811 *
1812 * @param pVM The handle of the VM to be powered off.
1813 *
1814 * @thread Any thread.
1815 * @vmstate Suspended, Running, Guru Meditation, Load Failure
1816 * @vmstateto Off or OffLS
1817 */
1818VMMR3DECL(int) VMR3PowerOff(PVM pVM)
1819{
1820 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
1821 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1822
1823 /*
1824 * Forward the request to the EMTs in reverse order, making all the other
1825 * EMTs stop working before EMT(0) comes and does the actual powering off.
1826 */
1827 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3PowerOff, 1, pVM);
1828 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
1829 return rc;
1830}
1831
1832
1833/**
1834 * Destroys the VM.
1835 *
1836 * The VM must be powered off (or never really powered on) to call this
1837 * function. The VM handle is destroyed and can no longer be used up successful
1838 * return.
1839 *
1840 * @returns VBox status code.
1841 *
1842 * @param pVM The handle of the VM which should be destroyed.
1843 *
1844 * @thread EMT(0) or any none emulation thread.
1845 * @vmstate Off, Created
1846 * @vmstateto N/A
1847 */
1848VMMR3DECL(int) VMR3Destroy(PVM pVM)
1849{
1850 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
1851
1852 /*
1853 * Validate input.
1854 */
1855 if (!pVM)
1856 return VERR_INVALID_PARAMETER;
1857 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1858 Assert(VMMGetCpuId(pVM) == 0 || VMMGetCpuId(pVM) == NIL_VMCPUID);
1859
1860 /*
1861 * Change VM state to destroying and unlink the VM.
1862 */
1863 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
1864 if (RT_FAILURE(rc))
1865 return rc;
1866
1867 /** @todo lock this when we start having multiple machines in a process... */
1868 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
1869 if (g_pUVMsHead == pUVM)
1870 g_pUVMsHead = pUVM->pNext;
1871 else
1872 {
1873 PUVM pPrev = g_pUVMsHead;
1874 while (pPrev && pPrev->pNext != pUVM)
1875 pPrev = pPrev->pNext;
1876 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
1877
1878 pPrev->pNext = pUVM->pNext;
1879 }
1880 pUVM->pNext = NULL;
1881
1882 /*
1883 * Notify registered at destruction listeners.
1884 */
1885 vmR3AtDtor(pVM);
1886
1887 /*
1888 * EMT(0) does the final cleanup, so if we're it calling VMR3Destroy then
1889 * we'll have to postpone parts of it till later. Otherwise, call
1890 * vmR3Destroy on each of the EMTs in ending with EMT(0) doing the bulk
1891 * of the cleanup.
1892 */
1893 if (VMMGetCpuId(pVM) == 0)
1894 {
1895 pUVM->vm.s.fEMTDoesTheCleanup = true;
1896 pUVM->vm.s.fTerminateEMT = true;
1897 VM_FF_SET(pVM, VM_FF_TERMINATE);
1898
1899 /* Terminate the other EMTs. */
1900 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
1901 {
1902 int rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3Destroy, 1, pVM);
1903 AssertLogRelRC(rc);
1904 }
1905 }
1906 else
1907 {
1908 /* vmR3Destroy on all EMTs, ending with EMT(0). */
1909 int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
1910 AssertLogRelRC(rc);
1911
1912 /* Wait for EMTs and destroy the UVM. */
1913 vmR3DestroyUVM(pUVM, 30000);
1914 }
1915
1916 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
1917 return VINF_SUCCESS;
1918}
1919
1920
1921/**
1922 * Internal destruction worker.
1923 *
1924 * This is either called from VMR3Destroy via VMR3ReqCallU or from
1925 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
1926 * VMR3Destroy().
1927 *
1928 * When called on EMT(0), it will performed the great bulk of the destruction.
1929 * When called on the other EMTs, they will do nothing and the whole purpose is
1930 * to return VINF_EM_TERMINATE so they break out of their run loops.
1931 *
1932 * @returns VINF_EM_TERMINATE.
1933 * @param pVM The VM handle.
1934 */
1935DECLCALLBACK(int) vmR3Destroy(PVM pVM)
1936{
1937 PUVM pUVM = pVM->pUVM;
1938 PVMCPU pVCpu = VMMGetCpu(pVM);
1939 Assert(pVCpu);
1940 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
1941
1942 /*
1943 * Only VCPU 0 does the full cleanup.
1944 */
1945 if (pVCpu->idCpu == 0)
1946 {
1947
1948 /*
1949 * Dump statistics to the log.
1950 */
1951#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
1952 RTLogFlags(NULL, "nodisabled nobuffered");
1953#endif
1954#ifdef VBOX_WITH_STATISTICS
1955 STAMR3Dump(pVM, "*");
1956#else
1957 LogRel(("************************* Statistics *************************\n"));
1958 STAMR3DumpToReleaseLog(pVM, "*");
1959 LogRel(("********************* End of statistics **********************\n"));
1960#endif
1961
1962 /*
1963 * Destroy the VM components.
1964 */
1965 int rc = TMR3Term(pVM);
1966 AssertRC(rc);
1967#ifdef VBOX_WITH_DEBUGGER
1968 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
1969 pUVM->vm.s.pvDBGC = NULL;
1970#endif
1971 AssertRC(rc);
1972 rc = DBGFR3Term(pVM);
1973 AssertRC(rc);
1974 rc = PDMR3Term(pVM);
1975 AssertRC(rc);
1976 rc = EMR3Term(pVM);
1977 AssertRC(rc);
1978 rc = IOMR3Term(pVM);
1979 AssertRC(rc);
1980 rc = CSAMR3Term(pVM);
1981 AssertRC(rc);
1982 rc = PATMR3Term(pVM);
1983 AssertRC(rc);
1984 rc = TRPMR3Term(pVM);
1985 AssertRC(rc);
1986 rc = SELMR3Term(pVM);
1987 AssertRC(rc);
1988 rc = REMR3Term(pVM);
1989 AssertRC(rc);
1990 rc = HWACCMR3Term(pVM);
1991 AssertRC(rc);
1992 rc = PGMR3Term(pVM);
1993 AssertRC(rc);
1994 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
1995 AssertRC(rc);
1996 rc = CPUMR3Term(pVM);
1997 AssertRC(rc);
1998 SSMR3Term(pVM);
1999 rc = PDMR3CritSectTerm(pVM);
2000 AssertRC(rc);
2001 rc = MMR3Term(pVM);
2002 AssertRC(rc);
2003
2004 /*
2005 * We're done in this thread (EMT).
2006 */
2007 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2008 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_TERMINATE);
2009 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2010 }
2011 return VINF_EM_TERMINATE;
2012}
2013
2014
2015/**
2016 * Called at the end of the EMT procedure to take care of the final cleanup.
2017 *
2018 * Currently only EMT(0) will do work here. It will destroy the shared VM
2019 * structure if it is still around. If EMT(0) was the caller of VMR3Destroy it
2020 * will destroy UVM and nothing will be left behind upon exit. But if some
2021 * other thread is calling VMR3Destroy, they will clean up UVM after all EMTs
2022 * has exitted.
2023 *
2024 * @param pUVM The UVM handle.
2025 * @param idCpu The virtual CPU id.
2026 */
2027void vmR3DestroyFinalBitFromEMT(PUVM pUVM, VMCPUID idCpu)
2028{
2029 /*
2030 * Only EMT(0) has work to do here.
2031 */
2032 if (idCpu != 0)
2033 return;
2034 Assert( !pUVM->pVM
2035 || VMMGetCpuId(pUVM->pVM) == 0);
2036
2037 /*
2038 * If we have a shared VM structure, change its state to Terminated and
2039 * tell GVMM to destroy it.
2040 */
2041 if (pUVM->pVM)
2042 {
2043 vmR3SetState(pUVM->pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
2044 int rc = SUPR3CallVMMR0Ex(pUVM->pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
2045 AssertLogRelRC(rc);
2046 pUVM->pVM = NULL;
2047 }
2048
2049 /*
2050 * If EMT(0) called VMR3Destroy, then it will destroy UVM as well.
2051 */
2052 if (pUVM->vm.s.fEMTDoesTheCleanup)
2053 vmR3DestroyUVM(pUVM, 30000);
2054}
2055
2056
2057/**
2058 * Destroys the UVM portion.
2059 *
2060 * This is called as the final step in the VM destruction or as the cleanup
2061 * in case of a creation failure. If EMT(0) called VMR3Destroy, meaning
2062 * VMINTUSERPERVM::fEMTDoesTheCleanup is true, it will call this as
2063 * vmR3DestroyFinalBitFromEMT completes.
2064 *
2065 * @param pVM VM Handle.
2066 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2067 * threads.
2068 */
2069static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2070{
2071 /*
2072 * Signal termination of each the emulation threads and
2073 * wait for them to complete.
2074 */
2075 /* Signal them. */
2076 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2077 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2078 {
2079 ASMAtomicUoWriteBool(&pUVM->aCpus[i].vm.s.fTerminateEMT, true);
2080 if (pUVM->pVM)
2081 VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE);
2082 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2083 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2084 }
2085
2086 /* Wait for them. */
2087 uint64_t NanoTS = RTTimeNanoTS();
2088 RTTHREAD hSelf = RTThreadSelf();
2089 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2090 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2091 {
2092 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2093 if ( hThread != NIL_RTTHREAD
2094 && hThread != hSelf)
2095 {
2096 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2097 int rc2 = RTThreadWait(hThread,
2098 cMilliesElapsed < cMilliesEMTWait
2099 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2100 : 2000,
2101 NULL);
2102 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2103 rc2 = RTThreadWait(hThread, 1000, NULL);
2104 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2105 if (RT_SUCCESS(rc2))
2106 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2107 }
2108 }
2109
2110 /* Cleanup the semaphores. */
2111 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2112 {
2113 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2114 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2115 }
2116
2117 /*
2118 * Free the event semaphores associated with the request packets.
2119 */
2120 unsigned cReqs = 0;
2121 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2122 {
2123 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2124 pUVM->vm.s.apReqFree[i] = NULL;
2125 for (; pReq; pReq = pReq->pNext, cReqs++)
2126 {
2127 pReq->enmState = VMREQSTATE_INVALID;
2128 RTSemEventDestroy(pReq->EventSem);
2129 }
2130 }
2131 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2132
2133 /*
2134 * Kill all queued requests. (There really shouldn't be any!)
2135 */
2136 for (unsigned i = 0; i < 10; i++)
2137 {
2138 PVMREQ pReqHead = (PVMREQ)ASMAtomicXchgPtr((void *volatile *)&pUVM->vm.s.pReqs, NULL);
2139 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2140 if (!pReqHead)
2141 break;
2142 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2143 {
2144 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2145 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2146 RTSemEventSignal(pReq->EventSem);
2147 RTThreadSleep(2);
2148 RTSemEventDestroy(pReq->EventSem);
2149 }
2150 /* give them a chance to respond before we free the request memory. */
2151 RTThreadSleep(32);
2152 }
2153
2154 /*
2155 * Now all queued VCPU requests (again, there shouldn't be any).
2156 */
2157 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2158 {
2159 PUVMCPU pUVCpu = &pUVM->aCpus[i];
2160
2161 for (unsigned i = 0; i < 10; i++)
2162 {
2163 PVMREQ pReqHead = (PVMREQ)ASMAtomicXchgPtr((void *volatile *)&pUVCpu->vm.s.pReqs, NULL);
2164 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2165 if (!pReqHead)
2166 break;
2167 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2168 {
2169 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2170 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2171 RTSemEventSignal(pReq->EventSem);
2172 RTThreadSleep(2);
2173 RTSemEventDestroy(pReq->EventSem);
2174 }
2175 /* give them a chance to respond before we free the request memory. */
2176 RTThreadSleep(32);
2177 }
2178 }
2179
2180 /*
2181 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2182 */
2183 PDMR3TermUVM(pUVM);
2184
2185 /*
2186 * Terminate the support library if initialized.
2187 */
2188 if (pUVM->vm.s.pSession)
2189 {
2190 int rc = SUPR3Term(false /*fForced*/);
2191 AssertRC(rc);
2192 pUVM->vm.s.pSession = NIL_RTR0PTR;
2193 }
2194
2195 /*
2196 * Destroy the MM heap and free the UVM structure.
2197 */
2198 MMR3TermUVM(pUVM);
2199 STAMR3TermUVM(pUVM);
2200
2201#ifdef LOG_ENABLED
2202 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2203#endif
2204 RTTlsFree(pUVM->vm.s.idxTLS);
2205
2206 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2207 RTMemPageFree(pUVM);
2208
2209 RTLogFlush(NULL);
2210}
2211
2212
2213/**
2214 * Enumerates the VMs in this process.
2215 *
2216 * @returns Pointer to the next VM.
2217 * @returns NULL when no more VMs.
2218 * @param pVMPrev The previous VM
2219 * Use NULL to start the enumeration.
2220 */
2221VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2222{
2223 /*
2224 * This is quick and dirty. It has issues with VM being
2225 * destroyed during the enumeration.
2226 */
2227 PUVM pNext;
2228 if (pVMPrev)
2229 pNext = pVMPrev->pUVM->pNext;
2230 else
2231 pNext = g_pUVMsHead;
2232 return pNext ? pNext->pVM : NULL;
2233}
2234
2235
2236/**
2237 * Registers an at VM destruction callback.
2238 *
2239 * @returns VBox status code.
2240 * @param pfnAtDtor Pointer to callback.
2241 * @param pvUser User argument.
2242 */
2243VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2244{
2245 /*
2246 * Check if already registered.
2247 */
2248 VM_ATDTOR_LOCK();
2249 PVMATDTOR pCur = g_pVMAtDtorHead;
2250 while (pCur)
2251 {
2252 if (pfnAtDtor == pCur->pfnAtDtor)
2253 {
2254 VM_ATDTOR_UNLOCK();
2255 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2256 return VERR_INVALID_PARAMETER;
2257 }
2258
2259 /* next */
2260 pCur = pCur->pNext;
2261 }
2262 VM_ATDTOR_UNLOCK();
2263
2264 /*
2265 * Allocate new entry.
2266 */
2267 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2268 if (!pVMAtDtor)
2269 return VERR_NO_MEMORY;
2270
2271 VM_ATDTOR_LOCK();
2272 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2273 pVMAtDtor->pvUser = pvUser;
2274 pVMAtDtor->pNext = g_pVMAtDtorHead;
2275 g_pVMAtDtorHead = pVMAtDtor;
2276 VM_ATDTOR_UNLOCK();
2277
2278 return VINF_SUCCESS;
2279}
2280
2281
2282/**
2283 * Deregisters an at VM destruction callback.
2284 *
2285 * @returns VBox status code.
2286 * @param pfnAtDtor Pointer to callback.
2287 */
2288VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2289{
2290 /*
2291 * Find it, unlink it and free it.
2292 */
2293 VM_ATDTOR_LOCK();
2294 PVMATDTOR pPrev = NULL;
2295 PVMATDTOR pCur = g_pVMAtDtorHead;
2296 while (pCur)
2297 {
2298 if (pfnAtDtor == pCur->pfnAtDtor)
2299 {
2300 if (pPrev)
2301 pPrev->pNext = pCur->pNext;
2302 else
2303 g_pVMAtDtorHead = pCur->pNext;
2304 pCur->pNext = NULL;
2305 VM_ATDTOR_UNLOCK();
2306
2307 RTMemFree(pCur);
2308 return VINF_SUCCESS;
2309 }
2310
2311 /* next */
2312 pPrev = pCur;
2313 pCur = pCur->pNext;
2314 }
2315 VM_ATDTOR_UNLOCK();
2316
2317 return VERR_INVALID_PARAMETER;
2318}
2319
2320
2321/**
2322 * Walks the list of at VM destructor callbacks.
2323 * @param pVM The VM which is about to be destroyed.
2324 */
2325static void vmR3AtDtor(PVM pVM)
2326{
2327 /*
2328 * Find it, unlink it and free it.
2329 */
2330 VM_ATDTOR_LOCK();
2331 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2332 pCur->pfnAtDtor(pVM, pCur->pvUser);
2333 VM_ATDTOR_UNLOCK();
2334}
2335
2336
2337/**
2338 * Worker which checks integrity of some internal structures.
2339 * This is yet another attempt to track down that AVL tree crash.
2340 */
2341static void vmR3CheckIntegrity(PVM pVM)
2342{
2343#ifdef VBOX_STRICT
2344 int rc = PGMR3CheckIntegrity(pVM);
2345 AssertReleaseRC(rc);
2346#endif
2347}
2348
2349
2350/**
2351 * Reset request processor.
2352 *
2353 * This is called by the emulation threads as a response to the
2354 * reset request issued by VMR3Reset().
2355 *
2356 * @returns VBox status code.
2357 * @param pVM VM to reset.
2358 */
2359static DECLCALLBACK(int) vmR3Reset(PVM pVM)
2360{
2361 int rcRet = VINF_EM_RESET;
2362 PVMCPU pVCpu = VMMGetCpu(pVM);
2363
2364 /*
2365 * The first EMT will try change the state to resetting.
2366 * We do the live save cancellation inside the state critsect because it
2367 * is cleaner and safer.
2368 */
2369 if (pVCpu->idCpu == pVM->cCpus - 1)
2370 {
2371 PUVM pUVM = pVM->pUVM;
2372 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2373 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3,
2374 VMSTATE_RESETTING, VMSTATE_RUNNING,
2375 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2376 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2377 if (rc == 3)
2378 SSMR3Cancel(pVM);
2379 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2380 if (RT_FAILURE(rc))
2381 return rc;
2382 }
2383
2384 /*
2385 * Check the state.
2386 */
2387 VMSTATE enmVMState = VMR3GetState(pVM);
2388 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2389 || enmVMState == VMSTATE_RESETTING_LS,
2390 ("%s\n", VMR3GetStateName(enmVMState)),
2391 VERR_VM_INVALID_VM_STATE);
2392
2393 /*
2394 * EMT(0) does the full cleanup *after* all the other EMTs has been
2395 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2396 *
2397 * Because there are per-cpu reset routines and order may/is important,
2398 * the following sequence looks a bit ugly...
2399 */
2400 if (pVCpu->idCpu == 0)
2401 vmR3CheckIntegrity(pVM);
2402
2403 /* Reset the VCpu state. */
2404 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2405
2406 /* Clear all pending forced actions. */
2407 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2408
2409 /*
2410 * Reset the VM components.
2411 */
2412 if (pVCpu->idCpu == 0)
2413 {
2414 PATMR3Reset(pVM);
2415 CSAMR3Reset(pVM);
2416 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2417 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2418 MMR3Reset(pVM);
2419 PDMR3Reset(pVM);
2420 SELMR3Reset(pVM);
2421 TRPMR3Reset(pVM);
2422 REMR3Reset(pVM);
2423 IOMR3Reset(pVM);
2424 CPUMR3Reset(pVM);
2425 }
2426 CPUMR3ResetCpu(pVCpu);
2427 if (pVCpu->idCpu == 0)
2428 {
2429 TMR3Reset(pVM);
2430 EMR3Reset(pVM);
2431 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2432
2433#ifdef LOG_ENABLED
2434 /*
2435 * Debug logging.
2436 */
2437 RTLogPrintf("\n\nThe VM was reset:\n");
2438 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2439#endif
2440
2441 /*
2442 * Since EMT(0) is the last to go thru here, it will advance the state.
2443 */
2444 PUVM pUVM = pVM->pUVM;
2445 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2446 enmVMState = pVM->enmVMState;
2447 if (enmVMState == VMSTATE_RESETTING)
2448 {
2449 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2450 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2451 else
2452 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING);
2453 }
2454 else
2455 {
2456 /** @todo EMT(0) should not execute code if the state is
2457 * VMSTATE_RESETTING_LS... This requires adding
2458 * VINF_EM_RESET_AND_SUSPEND. Can be done later. */
2459 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RESET_LS, VMSTATE_RESETTING_LS);
2460 rcRet = VINF_EM_RESET/*_AND_SUSPEND*/;
2461 }
2462 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2463
2464 vmR3CheckIntegrity(pVM);
2465 }
2466
2467 return rcRet;
2468}
2469
2470
2471/**
2472 * Reset the current VM.
2473 *
2474 * @returns VBox status code.
2475 * @param pVM VM to reset.
2476 */
2477VMMR3DECL(int) VMR3Reset(PVM pVM)
2478{
2479 LogFlow(("VMR3Reset:\n"));
2480 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2481
2482 /*
2483 * Forward the query on
2484 * Queue reset request to the emulation thread
2485 * and wait for it to be processed. (in reverse order as VCPU 0 does the real cleanup)
2486 */
2487 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Reset, 1, pVM);
2488 AssertLogRelRC(rc);
2489 return rc;
2490}
2491
2492
2493/**
2494 * Gets the current VM state.
2495 *
2496 * @returns The current VM state.
2497 * @param pVM VM handle.
2498 * @thread Any
2499 */
2500VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
2501{
2502 return pVM->enmVMState;
2503}
2504
2505
2506/**
2507 * Gets the state name string for a VM state.
2508 *
2509 * @returns Pointer to the state name. (readonly)
2510 * @param enmState The state.
2511 */
2512VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
2513{
2514 switch (enmState)
2515 {
2516 case VMSTATE_CREATING: return "CREATING";
2517 case VMSTATE_CREATED: return "CREATED";
2518 case VMSTATE_LOADING: return "LOADING";
2519 case VMSTATE_POWERING_ON: return "POWERING_ON";
2520 case VMSTATE_RESUMING: return "RESUMING";
2521 case VMSTATE_RUNNING: return "RUNNING";
2522 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
2523 case VMSTATE_RESETTING: return "RESETTING";
2524 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
2525 case VMSTATE_RESET_LS: return "RESET_LS";
2526 case VMSTATE_SUSPENDED: return "SUSPENDED";
2527 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
2528 case VMSTATE_SUSPENDING: return "SUSPENDING";
2529 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
2530 case VMSTATE_SAVING: return "SAVING";
2531 case VMSTATE_DEBUGGING: return "DEBUGGING";
2532 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
2533 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
2534 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
2535 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
2536 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
2537 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
2538 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
2539 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
2540 case VMSTATE_OFF: return "OFF";
2541 case VMSTATE_DESTROYING: return "DESTROYING";
2542 case VMSTATE_TERMINATED: return "TERMINATED";
2543
2544 default:
2545 AssertMsgFailed(("Unknown state %d\n", enmState));
2546 return "Unknown!\n";
2547 }
2548}
2549
2550
2551/**
2552 * Validates the state transition in strict builds.
2553 *
2554 * @returns true if valid, false if not.
2555 *
2556 * @param enmStateOld The old (current) state.
2557 * @param enmStateNew The proposed new state.
2558 *
2559 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
2560 * diagram (under State Machine Diagram).
2561 */
2562static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
2563{
2564#ifdef VBOX_STRICT
2565 switch (enmStateOld)
2566 {
2567 case VMSTATE_CREATING:
2568 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2569 break;
2570
2571 case VMSTATE_CREATED:
2572 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
2573 || enmStateNew == VMSTATE_POWERING_ON
2574 || enmStateNew == VMSTATE_POWERING_OFF
2575 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2576 break;
2577
2578 case VMSTATE_LOADING:
2579 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
2580 || enmStateNew == VMSTATE_LOAD_FAILURE
2581 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2582 break;
2583
2584 case VMSTATE_POWERING_ON:
2585 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2586 || enmStateNew == VMSTATE_FATAL_ERROR /*?*/
2587 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2588 break;
2589
2590 case VMSTATE_RESUMING:
2591 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2592 || enmStateNew == VMSTATE_FATAL_ERROR /*?*/
2593 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2594 break;
2595
2596 case VMSTATE_RUNNING:
2597 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2598 || enmStateNew == VMSTATE_SUSPENDING
2599 || enmStateNew == VMSTATE_RESETTING
2600 || enmStateNew == VMSTATE_RUNNING_LS
2601 || enmStateNew == VMSTATE_DEBUGGING
2602 || enmStateNew == VMSTATE_FATAL_ERROR
2603 || enmStateNew == VMSTATE_GURU_MEDITATION
2604 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2605 break;
2606
2607 case VMSTATE_RUNNING_LS:
2608 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
2609 || enmStateNew == VMSTATE_SUSPENDING_LS
2610 || enmStateNew == VMSTATE_RESETTING_LS
2611 || enmStateNew == VMSTATE_RUNNING
2612 || enmStateNew == VMSTATE_DEBUGGING_LS
2613 || enmStateNew == VMSTATE_FATAL_ERROR_LS
2614 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
2615 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2616 break;
2617
2618 case VMSTATE_RESETTING:
2619 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2620 break;
2621
2622 case VMSTATE_RESETTING_LS:
2623 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2624 || enmStateNew == VMSTATE_RESET_LS
2625 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2626 break;
2627
2628 case VMSTATE_RESET_LS:
2629 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2630 break;
2631
2632 case VMSTATE_SUSPENDING:
2633 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2634 break;
2635
2636 case VMSTATE_SUSPENDING_LS:
2637 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
2638 || enmStateNew == VMSTATE_SUSPENDED_LS
2639 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2640 break;
2641
2642 case VMSTATE_SUSPENDED:
2643 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2644 || enmStateNew == VMSTATE_SAVING
2645 || enmStateNew == VMSTATE_RESETTING
2646 || enmStateNew == VMSTATE_RESUMING
2647 || enmStateNew == VMSTATE_LOADING
2648 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2649 break;
2650
2651 case VMSTATE_SUSPENDED_LS:
2652 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED_LS
2653 || enmStateNew == VMSTATE_SUSPENDED
2654 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2655 break;
2656
2657 case VMSTATE_SAVING:
2658 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2659 break;
2660
2661 case VMSTATE_DEBUGGING:
2662 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2663 || enmStateNew == VMSTATE_POWERING_OFF
2664 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2665 break;
2666
2667 case VMSTATE_DEBUGGING_LS:
2668 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
2669 || enmStateNew == VMSTATE_RUNNING_LS
2670 || enmStateNew == VMSTATE_POWERING_OFF_LS
2671 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2672 break;
2673
2674 case VMSTATE_POWERING_OFF:
2675 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2676 break;
2677
2678 case VMSTATE_POWERING_OFF_LS:
2679 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2680 || enmStateNew == VMSTATE_OFF_LS
2681 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2682 break;
2683
2684 case VMSTATE_OFF:
2685 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2686 break;
2687
2688 case VMSTATE_OFF_LS:
2689 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2690 break;
2691
2692 case VMSTATE_FATAL_ERROR:
2693 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2694 break;
2695
2696 case VMSTATE_FATAL_ERROR_LS:
2697 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
2698 || enmStateNew == VMSTATE_POWERING_OFF_LS
2699 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2700 break;
2701
2702 case VMSTATE_GURU_MEDITATION:
2703 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
2704 || enmStateNew == VMSTATE_POWERING_OFF
2705 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2706 break;
2707
2708 case VMSTATE_GURU_MEDITATION_LS:
2709 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
2710 || enmStateNew == VMSTATE_POWERING_OFF_LS
2711 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2712 break;
2713
2714 case VMSTATE_LOAD_FAILURE:
2715 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2716 break;
2717
2718 case VMSTATE_DESTROYING:
2719 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2720 break;
2721
2722 case VMSTATE_TERMINATED:
2723 default:
2724 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2725 break;
2726 }
2727#endif /* VBOX_STRICT */
2728 return true;
2729}
2730
2731
2732/**
2733 * Does the state change callouts.
2734 *
2735 * The caller owns the AtStateCritSect.
2736 *
2737 * @param pVM The VM handle.
2738 * @param pUVM The UVM handle.
2739 * @param enmStateNew The New state.
2740 * @param enmStateOld The old state.
2741 */
2742static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
2743{
2744 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
2745
2746 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
2747 {
2748 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
2749 if ( enmStateNew != VMSTATE_DESTROYING
2750 && pVM->enmVMState == VMSTATE_DESTROYING)
2751 break;
2752 AssertMsg(pVM->enmVMState == enmStateNew,
2753 ("You are not allowed to change the state while in the change callback, except "
2754 "from destroying the VM. There are restrictions in the way the state changes "
2755 "are propagated up to the EM execution loop and it makes the program flow very "
2756 "difficult to follow. (%s, expected %s, old %s)\n",
2757 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
2758 VMR3GetStateName(enmStateOld)));
2759 }
2760}
2761
2762
2763/**
2764 * Sets the current VM state, with the AtStatCritSect already entered.
2765 *
2766 * @param pVM The VM handle.
2767 * @param pUVM The UVM handle.
2768 * @param enmStateNew The new state.
2769 * @param enmStateOld The old state.
2770 */
2771static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
2772{
2773 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
2774
2775 AssertMsg(pVM->enmVMState == enmStateOld,
2776 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
2777 pUVM->vm.s.enmPrevVMState = enmStateOld;
2778 pVM->enmVMState = enmStateNew;
2779
2780 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
2781}
2782
2783
2784/**
2785 * Sets the current VM state.
2786 *
2787 * @param pVM VM handle.
2788 * @param enmStateNew The new state.
2789 * @param enmStateOld The old state (for asserting only).
2790 */
2791static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
2792{
2793 PUVM pUVM = pVM->pUVM;
2794 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2795
2796 AssertMsg(pVM->enmVMState == enmStateOld,
2797 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
2798 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
2799
2800 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2801}
2802
2803
2804/**
2805 * Tries to perform a state transition.
2806 *
2807 * @returns The 1-based ordinal of the succeeding transition.
2808 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
2809 *
2810 * @param pVM The VM handle.
2811 * @param pszWho Who is trying to change it.
2812 * @param cTransitions The number of transitions in the ellipsis.
2813 * @param ... Transition pairs; new, old.
2814 */
2815static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
2816{
2817 va_list va;
2818 VMSTATE enmStateNew = VMSTATE_CREATED;
2819 VMSTATE enmStateOld = VMSTATE_CREATED;
2820
2821#ifdef VBOX_STRICT
2822 /*
2823 * Validate the input first.
2824 */
2825 va_start(va, cTransitions);
2826 for (unsigned i = 0; i < cTransitions; i++)
2827 {
2828 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
2829 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
2830 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
2831 }
2832 va_end(va);
2833#endif
2834
2835 /*
2836 * Grab the lock and see if any of the proposed transisions works out.
2837 */
2838 va_start(va, cTransitions);
2839 int rc = VERR_VM_INVALID_VM_STATE;
2840 PUVM pUVM = pVM->pUVM;
2841 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2842
2843 VMSTATE enmStateCur = pVM->enmVMState;
2844
2845 for (unsigned i = 0; i < cTransitions; i++)
2846 {
2847 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
2848 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
2849 if (enmStateCur == enmStateOld)
2850 {
2851 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
2852 rc = i + 1;
2853 break;
2854 }
2855 }
2856
2857 if (RT_FAILURE(rc))
2858 {
2859 /*
2860 * Complain about it.
2861 */
2862 if (cTransitions == 1)
2863 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
2864 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
2865 else
2866 {
2867 va_end(va);
2868 va_start(va, cTransitions);
2869 LogRel(("%s:\n", pszWho));
2870 for (unsigned i = 0; i < cTransitions; i++)
2871 {
2872 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
2873 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
2874 LogRel(("%s%s -> %s",
2875 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
2876 }
2877 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
2878 }
2879
2880 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
2881 N_("%s failed because the VM state is %s instead of %s"),
2882 VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
2883 AssertMsgFailed(("%s: %s -> %s failed, state is actually %s\n",
2884 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
2885 }
2886
2887 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2888 va_end(va);
2889 Assert(rc > 0 || rc < 0);
2890 return rc;
2891}
2892
2893
2894/**
2895 * Flag a guru meditation ... a hack.
2896 *
2897 * @param pVM The VM handle
2898 *
2899 * @todo Rewrite this part. The guru meditation should be flagged
2900 * immediately by the VMM and not by VMEmt.cpp when it's all over.
2901 */
2902void vmR3SetGuruMeditation(PVM pVM)
2903{
2904 PUVM pUVM = pVM->pUVM;
2905 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2906
2907 VMSTATE enmStateCur = pVM->enmVMState;
2908 if (enmStateCur == VMSTATE_RUNNING)
2909 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING);
2910 else if (enmStateCur == VMSTATE_RUNNING_LS)
2911 {
2912 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
2913 SSMR3Cancel(pVM);
2914 }
2915
2916 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2917}
2918
2919
2920/**
2921 * Registers a VM state change callback.
2922 *
2923 * You are not allowed to call any function which changes the VM state from a
2924 * state callback, except VMR3Destroy().
2925 *
2926 * @returns VBox status code.
2927 * @param pVM VM handle.
2928 * @param pfnAtState Pointer to callback.
2929 * @param pvUser User argument.
2930 * @thread Any.
2931 */
2932VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
2933{
2934 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
2935
2936 /*
2937 * Validate input.
2938 */
2939 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
2940 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2941
2942 /*
2943 * Allocate a new record.
2944 */
2945 PUVM pUVM = pVM->pUVM;
2946 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
2947 if (!pNew)
2948 return VERR_NO_MEMORY;
2949
2950 /* fill */
2951 pNew->pfnAtState = pfnAtState;
2952 pNew->pvUser = pvUser;
2953
2954 /* insert */
2955 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2956 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
2957 *pUVM->vm.s.ppAtStateNext = pNew;
2958 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
2959 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2960
2961 return VINF_SUCCESS;
2962}
2963
2964
2965/**
2966 * Deregisters a VM state change callback.
2967 *
2968 * @returns VBox status code.
2969 * @param pVM VM handle.
2970 * @param pfnAtState Pointer to callback.
2971 * @param pvUser User argument.
2972 * @thread Any.
2973 */
2974VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
2975{
2976 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
2977
2978 /*
2979 * Validate input.
2980 */
2981 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
2982 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2983
2984 PUVM pUVM = pVM->pUVM;
2985 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2986
2987 /*
2988 * Search the list for the entry.
2989 */
2990 PVMATSTATE pPrev = NULL;
2991 PVMATSTATE pCur = pUVM->vm.s.pAtState;
2992 while ( pCur
2993 && ( pCur->pfnAtState != pfnAtState
2994 || pCur->pvUser != pvUser))
2995 {
2996 pPrev = pCur;
2997 pCur = pCur->pNext;
2998 }
2999 if (!pCur)
3000 {
3001 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3002 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3003 return VERR_FILE_NOT_FOUND;
3004 }
3005
3006 /*
3007 * Unlink it.
3008 */
3009 if (pPrev)
3010 {
3011 pPrev->pNext = pCur->pNext;
3012 if (!pCur->pNext)
3013 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3014 }
3015 else
3016 {
3017 pUVM->vm.s.pAtState = pCur->pNext;
3018 if (!pCur->pNext)
3019 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3020 }
3021
3022 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3023
3024 /*
3025 * Free it.
3026 */
3027 pCur->pfnAtState = NULL;
3028 pCur->pNext = NULL;
3029 MMR3HeapFree(pCur);
3030
3031 return VINF_SUCCESS;
3032}
3033
3034
3035/**
3036 * Registers a VM error callback.
3037 *
3038 * @returns VBox status code.
3039 * @param pVM The VM handle.
3040 * @param pfnAtError Pointer to callback.
3041 * @param pvUser User argument.
3042 * @thread Any.
3043 */
3044VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3045{
3046 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3047}
3048
3049
3050/**
3051 * Registers a VM error callback.
3052 *
3053 * @returns VBox status code.
3054 * @param pUVM The VM handle.
3055 * @param pfnAtError Pointer to callback.
3056 * @param pvUser User argument.
3057 * @thread Any.
3058 */
3059VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3060{
3061 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3062
3063 /*
3064 * Validate input.
3065 */
3066 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3067 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3068
3069 /*
3070 * Allocate a new record.
3071 */
3072 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3073 if (!pNew)
3074 return VERR_NO_MEMORY;
3075
3076 /* fill */
3077 pNew->pfnAtError = pfnAtError;
3078 pNew->pvUser = pvUser;
3079
3080 /* insert */
3081 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3082 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3083 *pUVM->vm.s.ppAtErrorNext = pNew;
3084 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3085 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3086
3087 return VINF_SUCCESS;
3088}
3089
3090
3091/**
3092 * Deregisters a VM error callback.
3093 *
3094 * @returns VBox status code.
3095 * @param pVM The VM handle.
3096 * @param pfnAtError Pointer to callback.
3097 * @param pvUser User argument.
3098 * @thread Any.
3099 */
3100VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3101{
3102 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3103
3104 /*
3105 * Validate input.
3106 */
3107 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3108 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3109
3110 PUVM pUVM = pVM->pUVM;
3111 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3112
3113 /*
3114 * Search the list for the entry.
3115 */
3116 PVMATERROR pPrev = NULL;
3117 PVMATERROR pCur = pUVM->vm.s.pAtError;
3118 while ( pCur
3119 && ( pCur->pfnAtError != pfnAtError
3120 || pCur->pvUser != pvUser))
3121 {
3122 pPrev = pCur;
3123 pCur = pCur->pNext;
3124 }
3125 if (!pCur)
3126 {
3127 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3128 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3129 return VERR_FILE_NOT_FOUND;
3130 }
3131
3132 /*
3133 * Unlink it.
3134 */
3135 if (pPrev)
3136 {
3137 pPrev->pNext = pCur->pNext;
3138 if (!pCur->pNext)
3139 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3140 }
3141 else
3142 {
3143 pUVM->vm.s.pAtError = pCur->pNext;
3144 if (!pCur->pNext)
3145 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3146 }
3147
3148 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3149
3150 /*
3151 * Free it.
3152 */
3153 pCur->pfnAtError = NULL;
3154 pCur->pNext = NULL;
3155 MMR3HeapFree(pCur);
3156
3157 return VINF_SUCCESS;
3158}
3159
3160
3161/**
3162 * Ellipsis to va_list wrapper for calling pfnAtError.
3163 */
3164static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3165{
3166 va_list va;
3167 va_start(va, pszFormat);
3168 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3169 va_end(va);
3170}
3171
3172
3173/**
3174 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3175 * The message is found in VMINT.
3176 *
3177 * @param pVM The VM handle.
3178 * @thread EMT.
3179 */
3180VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3181{
3182 VM_ASSERT_EMT(pVM);
3183 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contrats!\n"));
3184
3185 /*
3186 * Unpack the error (if we managed to format one).
3187 */
3188 PVMERROR pErr = pVM->vm.s.pErrorR3;
3189 const char *pszFile = NULL;
3190 const char *pszFunction = NULL;
3191 uint32_t iLine = 0;
3192 const char *pszMessage;
3193 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3194 if (pErr)
3195 {
3196 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3197 if (pErr->offFile)
3198 pszFile = (const char *)pErr + pErr->offFile;
3199 iLine = pErr->iLine;
3200 if (pErr->offFunction)
3201 pszFunction = (const char *)pErr + pErr->offFunction;
3202 if (pErr->offMessage)
3203 pszMessage = (const char *)pErr + pErr->offMessage;
3204 else
3205 pszMessage = "No message!";
3206 }
3207 else
3208 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3209
3210 /*
3211 * Call the at error callbacks.
3212 */
3213 PUVM pUVM = pVM->pUVM;
3214 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3215 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3216 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3217 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3218}
3219
3220
3221/**
3222 * Creation time wrapper for vmR3SetErrorUV.
3223 *
3224 * @returns rc.
3225 * @param pUVM Pointer to the user mode VM structure.
3226 * @param rc The VBox status code.
3227 * @param RT_SRC_POS_DECL The source position of this error.
3228 * @param pszFormat Format string.
3229 * @param ... The arguments.
3230 * @thread Any thread.
3231 */
3232static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3233{
3234 va_list va;
3235 va_start(va, pszFormat);
3236 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3237 va_end(va);
3238 return rc;
3239}
3240
3241
3242/**
3243 * Worker which calls everyone listening to the VM error messages.
3244 *
3245 * @param pUVM Pointer to the user mode VM structure.
3246 * @param rc The VBox status code.
3247 * @param RT_SRC_POS_DECL The source position of this error.
3248 * @param pszFormat Format string.
3249 * @param pArgs Pointer to the format arguments.
3250 * @thread EMT
3251 */
3252DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3253{
3254#ifdef LOG_ENABLED
3255 /*
3256 * Log the error.
3257 */
3258 RTLogPrintf("VMSetError: %s(%d) %s\n", pszFile, iLine, pszFunction);
3259 va_list va3;
3260 va_copy(va3, *pArgs);
3261 RTLogPrintfV(pszFormat, va3);
3262 va_end(va3);
3263 RTLogPrintf("\n");
3264#endif
3265
3266 /*
3267 * Make a copy of the message.
3268 */
3269 if (pUVM->pVM)
3270 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3271
3272 /*
3273 * Call the at error callbacks.
3274 */
3275 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3276 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3277 {
3278 va_list va2;
3279 va_copy(va2, *pArgs);
3280 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3281 va_end(va2);
3282 }
3283 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3284}
3285
3286
3287/**
3288 * Registers a VM runtime error callback.
3289 *
3290 * @returns VBox status code.
3291 * @param pVM The VM handle.
3292 * @param pfnAtRuntimeError Pointer to callback.
3293 * @param pvUser User argument.
3294 * @thread Any.
3295 */
3296VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3297{
3298 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3299
3300 /*
3301 * Validate input.
3302 */
3303 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3304 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3305
3306 /*
3307 * Allocate a new record.
3308 */
3309 PUVM pUVM = pVM->pUVM;
3310 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3311 if (!pNew)
3312 return VERR_NO_MEMORY;
3313
3314 /* fill */
3315 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3316 pNew->pvUser = pvUser;
3317
3318 /* insert */
3319 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3320 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3321 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3322 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3323 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3324
3325 return VINF_SUCCESS;
3326}
3327
3328
3329/**
3330 * Deregisters a VM runtime error callback.
3331 *
3332 * @returns VBox status code.
3333 * @param pVM The VM handle.
3334 * @param pfnAtRuntimeError Pointer to callback.
3335 * @param pvUser User argument.
3336 * @thread Any.
3337 */
3338VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3339{
3340 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3341
3342 /*
3343 * Validate input.
3344 */
3345 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3346 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3347
3348 PUVM pUVM = pVM->pUVM;
3349 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3350
3351 /*
3352 * Search the list for the entry.
3353 */
3354 PVMATRUNTIMEERROR pPrev = NULL;
3355 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3356 while ( pCur
3357 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3358 || pCur->pvUser != pvUser))
3359 {
3360 pPrev = pCur;
3361 pCur = pCur->pNext;
3362 }
3363 if (!pCur)
3364 {
3365 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
3366 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3367 return VERR_FILE_NOT_FOUND;
3368 }
3369
3370 /*
3371 * Unlink it.
3372 */
3373 if (pPrev)
3374 {
3375 pPrev->pNext = pCur->pNext;
3376 if (!pCur->pNext)
3377 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
3378 }
3379 else
3380 {
3381 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
3382 if (!pCur->pNext)
3383 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
3384 }
3385
3386 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3387
3388 /*
3389 * Free it.
3390 */
3391 pCur->pfnAtRuntimeError = NULL;
3392 pCur->pNext = NULL;
3393 MMR3HeapFree(pCur);
3394
3395 return VINF_SUCCESS;
3396}
3397
3398
3399/**
3400 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
3401 *
3402 * This does the common parts after the error has been saved / retrieved.
3403 *
3404 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3405 *
3406 * @param pVM The VM handle.
3407 * @param fFlags The error flags.
3408 * @param pszErrorId Error ID string.
3409 * @param pszFormat Format string.
3410 * @param pVa Pointer to the format arguments.
3411 */
3412static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
3413{
3414 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
3415
3416 /*
3417 * Take actions before the call.
3418 */
3419 int rc = VINF_SUCCESS;
3420 if (fFlags & VMSETRTERR_FLAGS_FATAL)
3421 rc = vmR3SuspendCommon(pVM, true /*fFatal*/);
3422 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
3423 rc = vmR3SuspendCommon(pVM, false /*fFatal*/);
3424
3425 /*
3426 * Do the callback round.
3427 */
3428 PUVM pUVM = pVM->pUVM;
3429 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3430 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
3431 {
3432 va_list va;
3433 va_copy(va, *pVa);
3434 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
3435 va_end(va);
3436 }
3437 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3438
3439 return rc;
3440}
3441
3442
3443/**
3444 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
3445 */
3446static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
3447{
3448 va_list va;
3449 va_start(va, pszFormat);
3450 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
3451 va_end(va);
3452 return rc;
3453}
3454
3455
3456/**
3457 * This is a worker function for RC and Ring-0 calls to VMSetError and
3458 * VMSetErrorV.
3459 *
3460 * The message is found in VMINT.
3461 *
3462 * @returns VBox status code, see VMSetRuntimeError.
3463 * @param pVM The VM handle.
3464 * @thread EMT.
3465 */
3466VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
3467{
3468 VM_ASSERT_EMT(pVM);
3469 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
3470
3471 /*
3472 * Unpack the error (if we managed to format one).
3473 */
3474 const char *pszErrorId = "SetRuntimeError";
3475 const char *pszMessage = "No message!";
3476 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
3477 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
3478 if (pErr)
3479 {
3480 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3481 if (pErr->offErrorId)
3482 pszErrorId = (const char *)pErr + pErr->offErrorId;
3483 if (pErr->offMessage)
3484 pszMessage = (const char *)pErr + pErr->offMessage;
3485 fFlags = pErr->fFlags;
3486 }
3487
3488 /*
3489 * Join cause with vmR3SetRuntimeErrorV.
3490 */
3491 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
3492}
3493
3494
3495/**
3496 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
3497 *
3498 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3499 *
3500 * @param pVM The VM handle.
3501 * @param fFlags The error flags.
3502 * @param pszErrorId Error ID string.
3503 * @param pszMessage The error message residing the MM heap.
3504 *
3505 * @thread EMT
3506 */
3507DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
3508{
3509#if 0 /** @todo make copy of the error msg. */
3510 /*
3511 * Make a copy of the message.
3512 */
3513 va_list va2;
3514 va_copy(va2, *pVa);
3515 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
3516 va_end(va2);
3517#endif
3518
3519 /*
3520 * Join paths with VMR3SetRuntimeErrorWorker.
3521 */
3522 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
3523 MMR3HeapFree(pszMessage);
3524 return rc;
3525}
3526
3527
3528/**
3529 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
3530 *
3531 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3532 *
3533 * @param pVM The VM handle.
3534 * @param fFlags The error flags.
3535 * @param pszErrorId Error ID string.
3536 * @param pszFormat Format string.
3537 * @param pVa Pointer to the format arguments.
3538 *
3539 * @thread EMT
3540 */
3541DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
3542{
3543 /*
3544 * Make a copy of the message.
3545 */
3546 va_list va2;
3547 va_copy(va2, *pVa);
3548 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
3549 va_end(va2);
3550
3551 /*
3552 * Join paths with VMR3SetRuntimeErrorWorker.
3553 */
3554 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
3555}
3556
3557
3558/**
3559 * Gets the ID virtual of the virtual CPU assoicated with the calling thread.
3560 *
3561 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
3562 *
3563 * @param pVM The VM handle.
3564 */
3565VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
3566{
3567 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
3568 return pUVCpu
3569 ? pUVCpu->idCpu
3570 : NIL_VMCPUID;
3571}
3572
3573
3574/**
3575 * Returns the native handle of the current EMT VMCPU thread.
3576 *
3577 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3578 * @param pVM The VM handle.
3579 * @thread EMT
3580 */
3581VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
3582{
3583 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
3584
3585 if (!pUVCpu)
3586 return NIL_RTNATIVETHREAD;
3587
3588 return pUVCpu->vm.s.NativeThreadEMT;
3589}
3590
3591
3592/**
3593 * Returns the native handle of the current EMT VMCPU thread.
3594 *
3595 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3596 * @param pVM The VM handle.
3597 * @thread EMT
3598 */
3599VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
3600{
3601 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
3602
3603 if (!pUVCpu)
3604 return NIL_RTNATIVETHREAD;
3605
3606 return pUVCpu->vm.s.NativeThreadEMT;
3607}
3608
3609
3610/**
3611 * Returns the handle of the current EMT VMCPU thread.
3612 *
3613 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3614 * @param pVM The VM handle.
3615 * @thread EMT
3616 */
3617VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
3618{
3619 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
3620
3621 if (!pUVCpu)
3622 return NIL_RTTHREAD;
3623
3624 return pUVCpu->vm.s.ThreadEMT;
3625}
3626
3627
3628/**
3629 * Returns the handle of the current EMT VMCPU thread.
3630 *
3631 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3632 * @param pVM The VM handle.
3633 * @thread EMT
3634 */
3635VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
3636{
3637 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
3638
3639 if (!pUVCpu)
3640 return NIL_RTTHREAD;
3641
3642 return pUVCpu->vm.s.ThreadEMT;
3643}
3644
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette