VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 13791

Last change on this file since 13791 was 13791, checked in by vboxsync, 16 years ago

Moving data around

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.8 KB
Line 
1/* $Id: VMEmt.cpp 13791 2008-11-04 16:12:57Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/tm.h>
28#include <VBox/dbgf.h>
29#include <VBox/em.h>
30#include <VBox/pdmapi.h>
31#include <VBox/rem.h>
32#include <VBox/tm.h>
33#include "VMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/uvm.h>
36
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/semaphore.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/time.h>
45
46
47/**
48 * The emulation thread.
49 *
50 * @returns Thread exit code.
51 * @param ThreadSelf The handle to the executing thread.
52 * @param pvArgs Pointer to the user mode VM structure (UVM).
53 */
54DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
55{
56 PUVMCPU pUVMCPU = (PUVMCPU)pvArgs;
57 PUVM pUVM = pUVMCPU->pUVM;
58 RTCPUID idCPU = pUVMCPU->idCPU;
59 int rc = VINF_SUCCESS;
60
61 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
62 ("Invalid arguments to the emulation thread!\n"));
63
64 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVMCPU);
65 AssertReleaseMsgReturn(RT_SUCCESS(rc), ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
66
67 /*
68 * The request loop.
69 */
70 volatile VMSTATE enmBefore = VMSTATE_CREATING; /* volatile because of setjmp */
71 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
72 for (;;)
73 {
74 /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */
75 if (setjmp(pUVMCPU->vm.s.emtJumpEnv) != 0)
76 {
77 rc = VINF_SUCCESS;
78 break;
79 }
80
81 /*
82 * During early init there is no pVM, so make a special path
83 * for that to keep things clearly separate.
84 */
85 if (!pUVM->pVM)
86 {
87 /*
88 * Check for termination first.
89 */
90 if (pUVM->vm.s.fTerminateEMT)
91 {
92 rc = VINF_EM_TERMINATE;
93 break;
94 }
95 if (pUVM->vm.s.pReqs)
96 {
97 /*
98 * Service execute in EMT request.
99 */
100 rc = VMR3ReqProcessU(pUVM, VMREQDEST_ANY);
101 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
102 }
103 else
104 {
105 /*
106 * Nothing important is pending, so wait for something.
107 */
108 rc = VMR3WaitU(pUVM);
109 if (VBOX_FAILURE(rc))
110 break;
111 }
112 }
113 else
114 {
115
116 /*
117 * Pending requests which needs servicing?
118 *
119 * We check for state changes in addition to status codes when
120 * servicing requests. (Look after the ifs.)
121 */
122 PVM pVM = pUVM->pVM;
123 enmBefore = pVM->enmVMState;
124 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
125 || pUVM->vm.s.fTerminateEMT)
126 {
127 rc = VINF_EM_TERMINATE;
128 break;
129 }
130 if (pUVM->vm.s.pReqs)
131 {
132 /*
133 * Service execute in EMT request.
134 */
135 rc = VMR3ReqProcessU(pUVM, VMREQDEST_ANY);
136 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
137 }
138 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
139 {
140 /*
141 * Service the debugger request.
142 */
143 rc = DBGFR3VMMForcedAction(pVM);
144 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
145 }
146 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
147 {
148 /*
149 * Service a delayed reset request.
150 */
151 rc = VMR3Reset(pVM);
152 VM_FF_CLEAR(pVM, VM_FF_RESET);
153 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
154 }
155 else
156 {
157 /*
158 * Nothing important is pending, so wait for something.
159 */
160 rc = VMR3WaitU(pUVM);
161 if (VBOX_FAILURE(rc))
162 break;
163 }
164
165 /*
166 * Check for termination requests, these have extremely high priority.
167 */
168 if ( rc == VINF_EM_TERMINATE
169 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
170 || pUVM->vm.s.fTerminateEMT)
171 break;
172 }
173
174 /*
175 * Some requests (both VMR3Req* and the DBGF) can potentially
176 * resume or start the VM, in that case we'll get a change in
177 * VM status indicating that we're now running.
178 */
179 if ( VBOX_SUCCESS(rc)
180 && pUVM->pVM
181 && enmBefore != pUVM->pVM->enmVMState
182 && pUVM->pVM->enmVMState == VMSTATE_RUNNING)
183 {
184 PVM pVM = pUVM->pVM;
185 rc = EMR3ExecuteVM(pVM);
186 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Vrc, enmVMState=%d\n", rc, pVM->enmVMState));
187 if ( EMGetState(pVM) == EMSTATE_GURU_MEDITATION
188 && pVM->enmVMState == VMSTATE_RUNNING)
189 vmR3SetState(pVM, VMSTATE_GURU_MEDITATION);
190 }
191
192 } /* forever */
193
194
195 /*
196 * Exiting.
197 */
198 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Vrc enmBefore=%d enmVMState=%d\n",
199 ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
200 if (pUVM->vm.s.fEMTDoesTheCleanup)
201 {
202 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
203 Assert(pUVM->pVM);
204 vmR3Destroy(pUVM->pVM);
205 vmR3DestroyFinalBitFromEMT(pUVM);
206 }
207 else
208 {
209 vmR3DestroyFinalBitFromEMT(pUVM);
210
211 pUVMCPU->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
212 }
213 Log(("vmR3EmulationThread: EMT is terminated.\n"));
214 return rc;
215}
216
217
218/**
219 * Wait for VM to be resumed. Handle events like vmR3EmulationThread does.
220 * In case the VM is stopped, clean up and long jump to the main EMT loop.
221 *
222 * @returns VINF_SUCCESS or doesn't return
223 * @param pVM VM handle.
224 */
225VMMR3DECL(int) VMR3WaitForResume(PVM pVM)
226{
227 /*
228 * The request loop.
229 */
230 PUVMCPU pUVMCPU;
231 PUVM pUVM = pVM->pUVM;
232 VMSTATE enmBefore;
233 int rc;
234
235 pUVMCPU = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
236 AssertReturn(pUVMCPU, VERR_INTERNAL_ERROR);
237
238 for (;;)
239 {
240
241 /*
242 * Pending requests which needs servicing?
243 *
244 * We check for state changes in addition to status codes when
245 * servicing requests. (Look after the ifs.)
246 */
247 enmBefore = pVM->enmVMState;
248 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
249 || pUVM->vm.s.fTerminateEMT)
250 {
251 rc = VINF_EM_TERMINATE;
252 break;
253 }
254 else if (pUVM->vm.s.pReqs)
255 {
256 /*
257 * Service execute in EMT request.
258 */
259 rc = VMR3ReqProcessU(pUVM, VMREQDEST_ANY);
260 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
261 }
262 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
263 {
264 /*
265 * Service the debugger request.
266 */
267 rc = DBGFR3VMMForcedAction(pVM);
268 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
269 }
270 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
271 {
272 /*
273 * Service a delay reset request.
274 */
275 rc = VMR3Reset(pVM);
276 VM_FF_CLEAR(pVM, VM_FF_RESET);
277 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
278 }
279 else
280 {
281 /*
282 * Nothing important is pending, so wait for something.
283 */
284 rc = VMR3WaitU(pUVM);
285 if (VBOX_FAILURE(rc))
286 break;
287 }
288
289 /*
290 * Check for termination requests, these are extremely high priority.
291 */
292 if ( rc == VINF_EM_TERMINATE
293 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
294 || pUVM->vm.s.fTerminateEMT)
295 break;
296
297 /*
298 * Some requests (both VMR3Req* and the DBGF) can potentially
299 * resume or start the VM, in that case we'll get a change in
300 * VM status indicating that we're now running.
301 */
302 if ( VBOX_SUCCESS(rc)
303 && enmBefore != pVM->enmVMState
304 && pVM->enmVMState == VMSTATE_RUNNING)
305 {
306 /* Only valid exit reason. */
307 return VINF_SUCCESS;
308 }
309
310 } /* forever */
311
312 /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */
313 longjmp(pUVMCPU->vm.s.emtJumpEnv, 1);
314}
315
316
317/**
318 * Gets the name of a halt method.
319 *
320 * @returns Pointer to a read only string.
321 * @param enmMethod The method.
322 */
323static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
324{
325 switch (enmMethod)
326 {
327 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
328 case VMHALTMETHOD_DEFAULT: return "default";
329 case VMHALTMETHOD_OLD: return "old";
330 case VMHALTMETHOD_1: return "method1";
331 //case VMHALTMETHOD_2: return "method2";
332 case VMHALTMETHOD_GLOBAL_1: return "global1";
333 default: return "unknown";
334 }
335}
336
337
338/**
339 * The old halt loop.
340 *
341 * @param pUVM Pointer to the user mode VM structure.
342 */
343static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVM pUVM, const uint32_t fMask, uint64_t /* u64Now*/)
344{
345 /*
346 * Halt loop.
347 */
348 PVM pVM = pUVM->pVM;
349 int rc = VINF_SUCCESS;
350 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
351 //unsigned cLoops = 0;
352 for (;;)
353 {
354 /*
355 * Work the timers and check if we can exit.
356 * The poll call gives us the ticks left to the next event in
357 * addition to perhaps set an FF.
358 */
359 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
360 PDMR3Poll(pVM);
361 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
362 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
363 TMR3TimerQueuesDo(pVM);
364 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
365 if (VM_FF_ISPENDING(pVM, fMask))
366 break;
367 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
368 if (VM_FF_ISPENDING(pVM, fMask))
369 break;
370
371 /*
372 * Wait for a while. Someone will wake us up or interrupt the call if
373 * anything needs our attention.
374 */
375 if (u64NanoTS < 50000)
376 {
377 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
378 /* spin */;
379 }
380 else
381 {
382 VMMR3YieldStop(pVM);
383 //uint64_t u64Start = RTTimeNanoTS();
384 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
385 {
386 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
387 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, a);
388 RTThreadYield(); /* this is the best we can do here */
389 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, a);
390 }
391 else if (u64NanoTS < 2000000)
392 {
393 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
394 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
395 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1);
396 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
397 }
398 else
399 {
400 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
401 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
402 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
403 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
404 }
405 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
406 //RTLogPrintf(" -> rc=%Vrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
407 }
408 if (rc == VERR_TIMEOUT)
409 rc = VINF_SUCCESS;
410 else if (VBOX_FAILURE(rc))
411 {
412 AssertRC(rc != VERR_INTERRUPTED);
413 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
414 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
415 VM_FF_SET(pVM, VM_FF_TERMINATE);
416 rc = VERR_INTERNAL_ERROR;
417 break;
418 }
419 }
420
421 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
422 return rc;
423}
424
425
426/**
427 * Initialize the configuration of halt method 1 & 2.
428 *
429 * @return VBox status code. Failure on invalid CFGM data.
430 * @param pVM The VM handle.
431 */
432static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
433{
434 /*
435 * The defaults.
436 */
437#if 1 /* DEBUGGING STUFF - REMOVE LATER */
438 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
439 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
440 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
441 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
442 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
443#else
444 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
445 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
446 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
447 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
448 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
449#endif
450
451 /*
452 * Query overrides.
453 *
454 * I don't have time to bother with niceities such as invalid value checks
455 * here right now. sorry.
456 */
457 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
458 if (pCfg)
459 {
460 uint32_t u32;
461 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
462 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
463 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
464 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
465 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
466 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
467 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
468 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
469 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
470 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
471 LogRel(("HaltedMethod1 config: %d/%d/%d/%d/%d\n",
472 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
473 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
474 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
475 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
476 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
477 }
478
479 return VINF_SUCCESS;
480}
481
482
483/**
484 * Initialize halt method 1.
485 *
486 * @return VBox status code.
487 * @param pUVM Pointer to the user mode VM structure.
488 */
489static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
490{
491 return vmR3HaltMethod12ReadConfigU(pUVM);
492}
493
494
495/**
496 * Method 1 - Block whenever possible, and when lagging behind
497 * switch to spinning for 10-30ms with occational blocking until
498 * the lag has been eliminated.
499 */
500static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
501{
502 PVM pVM = pUVM->pVM;
503
504 /*
505 * To simplify things, we decide up-front whether we should switch to spinning or
506 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
507 * and that it will generate interrupts or other events that will cause us to exit
508 * the halt loop.
509 */
510 bool fBlockOnce = false;
511 bool fSpinning = false;
512 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
513 if (u32CatchUpPct /* non-zero if catching up */)
514 {
515 if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
516 {
517 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
518 if (fSpinning)
519 {
520 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
521 fBlockOnce = u64Now - pUVM->vm.s.Halt.Method12.u64LastBlockTS
522 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
523 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
524 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
525 }
526 else
527 {
528 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
529 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
530 }
531 }
532 else
533 {
534 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
535 if (fSpinning)
536 pUVM->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
537 }
538 }
539 else if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
540 {
541 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
542 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
543 }
544
545 /*
546 * Halt loop.
547 */
548 int rc = VINF_SUCCESS;
549 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
550 unsigned cLoops = 0;
551 for (;; cLoops++)
552 {
553 /*
554 * Work the timers and check if we can exit.
555 */
556 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
557 PDMR3Poll(pVM);
558 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
559 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
560 TMR3TimerQueuesDo(pVM);
561 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
562 if (VM_FF_ISPENDING(pVM, fMask))
563 break;
564
565 /*
566 * Estimate time left to the next event.
567 */
568 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
569 if (VM_FF_ISPENDING(pVM, fMask))
570 break;
571
572 /*
573 * Block if we're not spinning and the interval isn't all that small.
574 */
575 if ( ( !fSpinning
576 || fBlockOnce)
577#if 1 /* DEBUGGING STUFF - REMOVE LATER */
578 && u64NanoTS >= 100000) /* 0.100 ms */
579#else
580 && u64NanoTS >= 250000) /* 0.250 ms */
581#endif
582 {
583 const uint64_t Start = pUVM->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
584 VMMR3YieldStop(pVM);
585
586 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
587 if (cMilliSecs <= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
588 cMilliSecs = 1;
589 else
590 cMilliSecs -= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
591 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
592 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
593 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, cMilliSecs);
594 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
595 if (rc == VERR_TIMEOUT)
596 rc = VINF_SUCCESS;
597 else if (VBOX_FAILURE(rc))
598 {
599 AssertRC(rc != VERR_INTERRUPTED);
600 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
601 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
602 VM_FF_SET(pVM, VM_FF_TERMINATE);
603 rc = VERR_INTERNAL_ERROR;
604 break;
605 }
606
607 /*
608 * Calc the statistics.
609 * Update averages every 16th time, and flush parts of the history every 64th time.
610 */
611 const uint64_t Elapsed = RTTimeNanoTS() - Start;
612 pUVM->vm.s.Halt.Method12.cNSBlocked += Elapsed;
613 if (Elapsed > u64NanoTS)
614 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
615 pUVM->vm.s.Halt.Method12.cBlocks++;
616 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0xf))
617 {
618 pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVM->vm.s.Halt.Method12.cNSBlockedTooLong / pUVM->vm.s.Halt.Method12.cBlocks;
619 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0x3f))
620 {
621 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong = pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
622 pUVM->vm.s.Halt.Method12.cBlocks = 0x40;
623 }
624 }
625 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
626
627 /*
628 * Clear the block once flag if we actually blocked.
629 */
630 if ( fBlockOnce
631 && Elapsed > 100000 /* 0.1 ms */)
632 fBlockOnce = false;
633 }
634 }
635 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
636
637 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
638 return rc;
639}
640
641
642/**
643 * Initialize the global 1 halt method.
644 *
645 * @return VBox status code.
646 * @param pUVM Pointer to the user mode VM structure.
647 */
648static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
649{
650 return VINF_SUCCESS;
651}
652
653
654/**
655 * The global 1 halt method - Block in GMM (ring-0) and let it
656 * try take care of the global scheduling of EMT threads.
657 */
658static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
659{
660 PVM pVM = pUVM->pVM;
661
662 /*
663 * Halt loop.
664 */
665 int rc = VINF_SUCCESS;
666 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
667 unsigned cLoops = 0;
668 for (;; cLoops++)
669 {
670 /*
671 * Work the timers and check if we can exit.
672 */
673 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
674 PDMR3Poll(pVM);
675 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
676 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
677 TMR3TimerQueuesDo(pVM);
678 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
679 if (VM_FF_ISPENDING(pVM, fMask))
680 break;
681
682 /*
683 * Estimate time left to the next event.
684 */
685 uint64_t u64Delta;
686 uint64_t u64GipTime = TMTimerPollGIP(pVM, &u64Delta);
687 if (VM_FF_ISPENDING(pVM, fMask))
688 break;
689
690 /*
691 * Block if we're not spinning and the interval isn't all that small.
692 */
693 if (u64Delta > 50000 /* 0.050ms */)
694 {
695 VMMR3YieldStop(pVM);
696 if (VM_FF_ISPENDING(pVM, fMask))
697 break;
698
699 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
700 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, c);
701 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
702 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, c);
703 if (rc == VERR_INTERRUPTED)
704 rc = VINF_SUCCESS;
705 else if (VBOX_FAILURE(rc))
706 {
707 AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Vrc\n", rc));
708 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
709 VM_FF_SET(pVM, VM_FF_TERMINATE);
710 rc = VERR_INTERNAL_ERROR;
711 break;
712 }
713 }
714 /*
715 * When spinning call upon the GVMM and do some wakups once
716 * in a while, it's not like we're actually busy or anything.
717 */
718 else if (!(cLoops & 0x1fff))
719 {
720 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, d);
721 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
722 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, d);
723 }
724 }
725 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
726
727 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
728 return rc;
729}
730
731
732/**
733 * The global 1 halt method - VMR3Wait() worker.
734 *
735 * @returns VBox status code.
736 * @param pUVM Pointer to the user mode VM structure.
737 */
738static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVM pUVM)
739{
740 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
741
742 PVM pVM = pUVM->pVM;
743 int rc = VINF_SUCCESS;
744 for (;;)
745 {
746 /*
747 * Check Relevant FFs.
748 */
749 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
750 break;
751
752 /*
753 * Wait for a while. Someone will wake us up or interrupt the call if
754 * anything needs our attention.
755 */
756 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
757 if (rc == VERR_INTERRUPTED)
758 rc = VINF_SUCCESS;
759 else if (VBOX_FAILURE(rc))
760 {
761 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
762 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
763 VM_FF_SET(pVM, VM_FF_TERMINATE);
764 rc = VERR_INTERNAL_ERROR;
765 break;
766 }
767
768 }
769
770 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
771 return rc;
772}
773
774
775/**
776 * The global 1 halt method - VMR3NotifyFF() worker.
777 *
778 * @param pUVM Pointer to the user mode VM structure.
779 * @param fNotifiedREM See VMR3NotifyFF().
780 */
781static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVM pUVM, bool fNotifiedREM)
782{
783 if (pUVM->vm.s.fWait)
784 {
785 int rc = SUPCallVMMR0Ex(pUVM->pVM->pVMR0, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
786 AssertRC(rc);
787 }
788 else if (!fNotifiedREM)
789 REMR3NotifyFF(pUVM->pVM);
790}
791
792
793/**
794 * Bootstrap VMR3Wait() worker.
795 *
796 * @returns VBox status code.
797 * @param pUVM Pointer to the user mode VM structure.
798 */
799static DECLCALLBACK(int) vmR3BootstrapWait(PUVM pUVM)
800{
801 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
802
803 int rc = VINF_SUCCESS;
804 for (;;)
805 {
806 /*
807 * Check Relevant FFs.
808 */
809 if (pUVM->vm.s.pReqs)
810 break;
811 if ( pUVM->pVM
812 && VM_FF_ISPENDING(pUVM->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
813 break;
814 if (pUVM->vm.s.fTerminateEMT)
815 break;
816
817 /*
818 * Wait for a while. Someone will wake us up or interrupt the call if
819 * anything needs our attention.
820 */
821 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
822 if (rc == VERR_TIMEOUT)
823 rc = VINF_SUCCESS;
824 else if (VBOX_FAILURE(rc))
825 {
826 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
827 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
828 if (pUVM->pVM)
829 VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE);
830 rc = VERR_INTERNAL_ERROR;
831 break;
832 }
833
834 }
835
836 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
837 return rc;
838}
839
840
841/**
842 * Bootstrap VMR3NotifyFF() worker.
843 *
844 * @param pUVM Pointer to the user mode VM structure.
845 * @param fNotifiedREM See VMR3NotifyFF().
846 */
847static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVM pUVM, bool fNotifiedREM)
848{
849 if (pUVM->vm.s.fWait)
850 {
851 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
852 AssertRC(rc);
853 }
854}
855
856
857/**
858 * Default VMR3Wait() worker.
859 *
860 * @returns VBox status code.
861 * @param pUVM Pointer to the user mode VM structure.
862 */
863static DECLCALLBACK(int) vmR3DefaultWait(PUVM pUVM)
864{
865 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
866
867 PVM pVM = pUVM->pVM;
868 int rc = VINF_SUCCESS;
869 for (;;)
870 {
871 /*
872 * Check Relevant FFs.
873 */
874 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
875 break;
876
877 /*
878 * Wait for a while. Someone will wake us up or interrupt the call if
879 * anything needs our attention.
880 */
881 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
882 if (rc == VERR_TIMEOUT)
883 rc = VINF_SUCCESS;
884 else if (VBOX_FAILURE(rc))
885 {
886 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
887 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
888 VM_FF_SET(pVM, VM_FF_TERMINATE);
889 rc = VERR_INTERNAL_ERROR;
890 break;
891 }
892
893 }
894
895 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
896 return rc;
897}
898
899
900/**
901 * Default VMR3NotifyFF() worker.
902 *
903 * @param pUVM Pointer to the user mode VM structure.
904 * @param fNotifiedREM See VMR3NotifyFF().
905 */
906static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVM pUVM, bool fNotifiedREM)
907{
908 if (pUVM->vm.s.fWait)
909 {
910 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
911 AssertRC(rc);
912 }
913 else if (!fNotifiedREM)
914 REMR3NotifyFF(pUVM->pVM);
915}
916
917
918/**
919 * Array with halt method descriptors.
920 * VMINT::iHaltMethod contains an index into this array.
921 */
922static const struct VMHALTMETHODDESC
923{
924 /** The halt method id. */
925 VMHALTMETHOD enmHaltMethod;
926 /** The init function for loading config and initialize variables. */
927 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
928 /** The term function. */
929 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
930 /** The halt function. */
931 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVM pUVM, const uint32_t fMask, uint64_t u64Now));
932 /** The wait function. */
933 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVM pUVM));
934 /** The notifyFF function. */
935 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVM pUVM, bool fNotifiedREM));
936} g_aHaltMethods[] =
937{
938 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyFF },
939 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF },
940 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyFF },
941 //{ VMHALTMETHOD_2, vmR3HaltMethod2Init, vmR3HaltMethod2Term, vmR3HaltMethod2DoHalt, vmR3HaltMethod2Wait, vmR3HaltMethod2NotifyFF },
942 { VMHALTMETHOD_GLOBAL_1,vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyFF },
943};
944
945
946/**
947 * Notify the emulation thread (EMT) about pending Forced Action (FF).
948 *
949 * This function is called by thread other than EMT to make
950 * sure EMT wakes up and promptly service an FF request.
951 *
952 * @param pVM VM handle.
953 * @param fNotifiedREM Set if REM have already been notified. If clear the
954 * generic REMR3NotifyFF() method is called.
955 */
956VMMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM)
957{
958 LogFlow(("VMR3NotifyFF:\n"));
959 PUVM pUVM = pVM->pUVM;
960 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
961}
962
963
964/**
965 * Notify the emulation thread (EMT) about pending Forced Action (FF).
966 *
967 * This function is called by thread other than EMT to make
968 * sure EMT wakes up and promptly service an FF request.
969 *
970 * @param pUVM Pointer to the user mode VM structure.
971 * @param fNotifiedREM Set if REM have already been notified. If clear the
972 * generic REMR3NotifyFF() method is called.
973 */
974VMMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM)
975{
976 LogFlow(("VMR3NotifyFF:\n"));
977 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
978}
979
980
981/**
982 * Halted VM Wait.
983 * Any external event will unblock the thread.
984 *
985 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
986 * case an appropriate status code is returned.
987 * @param pVM VM handle.
988 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
989 * @thread The emulation thread.
990 */
991VMMR3DECL(int) VMR3WaitHalted(PVM pVM, bool fIgnoreInterrupts)
992{
993 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
994
995 /*
996 * Check Relevant FFs.
997 */
998 const uint32_t fMask = !fIgnoreInterrupts
999 ? VM_FF_EXTERNAL_HALTED_MASK
1000 : VM_FF_EXTERNAL_HALTED_MASK & ~(VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC);
1001 if (VM_FF_ISPENDING(pVM, fMask))
1002 {
1003 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
1004 return VINF_SUCCESS;
1005 }
1006
1007 /*
1008 * The yielder is suspended while we're halting, while TM might have clock(s) running
1009 * only at certain times and need to be notified..
1010 */
1011 VMMR3YieldSuspend(pVM);
1012 TMNotifyStartOfHalt(pVM);
1013
1014 /*
1015 * Record halt averages for the last second.
1016 */
1017 PUVM pUVM = pVM->pUVM;
1018 uint64_t u64Now = RTTimeNanoTS();
1019 int64_t off = u64Now - pUVM->vm.s.u64HaltsStartTS;
1020 if (off > 1000000000)
1021 {
1022 if (off > _4G || !pUVM->vm.s.cHalts)
1023 {
1024 pUVM->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1025 pUVM->vm.s.HaltFrequency = 1;
1026 }
1027 else
1028 {
1029 pUVM->vm.s.HaltInterval = (uint32_t)off / pUVM->vm.s.cHalts;
1030 pUVM->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVM->vm.s.cHalts, 1000000000, (uint32_t)off);
1031 }
1032 pUVM->vm.s.u64HaltsStartTS = u64Now;
1033 pUVM->vm.s.cHalts = 0;
1034 }
1035 pUVM->vm.s.cHalts++;
1036
1037 /*
1038 * Do the halt.
1039 */
1040 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVM, fMask, u64Now);
1041
1042 /*
1043 * Notify TM and resume the yielder
1044 */
1045 TMNotifyEndOfHalt(pVM);
1046 VMMR3YieldResume(pVM);
1047
1048 LogFlow(("VMR3WaitHalted: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions));
1049 return rc;
1050}
1051
1052
1053/**
1054 * Suspended VM Wait.
1055 * Only a handful of forced actions will cause the function to
1056 * return to the caller.
1057 *
1058 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
1059 * case an appropriate status code is returned.
1060 * @param pUVM Pointer to the user mode VM structure.
1061 * @thread The emulation thread.
1062 */
1063VMMR3DECL(int) VMR3WaitU(PUVM pUVM)
1064{
1065 LogFlow(("VMR3WaitU:\n"));
1066
1067 /*
1068 * Check Relevant FFs.
1069 */
1070 PVM pVM = pUVM->pVM;
1071 if ( pVM
1072 && VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
1073 {
1074 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
1075 return VINF_SUCCESS;
1076 }
1077
1078 /*
1079 * Do waiting according to the halt method (so VMR3NotifyFF
1080 * doesn't have to special case anything).
1081 */
1082 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVM);
1083 LogFlow(("VMR3WaitU: returns %Vrc (FF %#x)\n", rc, pVM ? pVM->fForcedActions : 0));
1084 return rc;
1085}
1086
1087
1088/**
1089 * Changes the halt method.
1090 *
1091 * @returns VBox status code.
1092 * @param pUVM Pointer to the user mode VM structure.
1093 * @param enmHaltMethod The new halt method.
1094 * @thread EMT.
1095 */
1096int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1097{
1098 PVM pVM = pUVM->pVM; Assert(pVM);
1099 VM_ASSERT_EMT(pVM);
1100 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1101
1102 /*
1103 * Resolve default (can be overridden in the configuration).
1104 */
1105 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1106 {
1107 uint32_t u32;
1108 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1109 if (VBOX_SUCCESS(rc))
1110 {
1111 enmHaltMethod = (VMHALTMETHOD)u32;
1112 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1113 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1114 }
1115 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1116 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1117 else
1118 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1119 //enmHaltMethod = VMHALTMETHOD_1;
1120 //enmHaltMethod = VMHALTMETHOD_OLD;
1121 }
1122 LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1123
1124 /*
1125 * Find the descriptor.
1126 */
1127 unsigned i = 0;
1128 while ( i < RT_ELEMENTS(g_aHaltMethods)
1129 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1130 i++;
1131 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1132
1133 /*
1134 * Terminate the old one.
1135 */
1136 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1137 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1138 {
1139 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1140 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1141 }
1142
1143 /*
1144 * Init the new one.
1145 */
1146 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1147 if (g_aHaltMethods[i].pfnInit)
1148 {
1149 int rc = g_aHaltMethods[i].pfnInit(pUVM);
1150 AssertRCReturn(rc, rc);
1151 }
1152 pUVM->vm.s.enmHaltMethod = enmHaltMethod;
1153
1154 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1155 return VINF_SUCCESS;
1156}
1157
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette