VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 6854

Last change on this file since 6854 was 6804, checked in by vboxsync, 17 years ago

Fixed VMR3PowerOn regression.

The problem was that the EMT outer loop wasn't checking for state
changes in the init case (pUVM->pVM == NULL). So, if the VMR3PowerOn
request was queued and executed by the initial VMR3ReqProcessU call
we wouldn't call EMR3ExecuteVM when the VM state changed to running.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.1 KB
Line 
1/* $Id: VMEmt.cpp 6804 2008-02-05 10:27:19Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_VM
23#include <VBox/tm.h>
24#include <VBox/dbgf.h>
25#include <VBox/em.h>
26#include <VBox/pdmapi.h>
27#include <VBox/rem.h>
28#include "VMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/uvm.h>
31
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/thread.h>
39#include <iprt/time.h>
40
41
42
43
44/**
45 * The emulation thread.
46 *
47 * @returns Thread exit code.
48 * @param ThreadSelf The handle to the executing thread.
49 * @param pvArgs Pointer to the user mode VM structure (UVM).
50 */
51DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
52{
53 PUVM pUVM = (PUVM)pvArgs;
54 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
55 ("Invalid arguments to the emulation thread!\n"));
56
57 /*
58 * Init the native thread member.
59 */
60 pUVM->vm.s.NativeThreadEMT = RTThreadGetNative(ThreadSelf);
61
62 /*
63 * The request loop.
64 */
65 int rc = VINF_SUCCESS;
66 VMSTATE enmBefore = VMSTATE_CREATING;
67 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
68 for (;;)
69 {
70 /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */
71 if (setjmp(pUVM->vm.s.emtJumpEnv) != 0)
72 {
73 rc = VINF_SUCCESS;
74 break;
75 }
76
77 /*
78 * During early init there is no pVM, so make a special path
79 * for that to keep things clearly separate.
80 */
81 if (!pUVM->pVM)
82 {
83 /*
84 * Check for termination first.
85 */
86 if (pUVM->vm.s.fTerminateEMT)
87 {
88 rc = VINF_EM_TERMINATE;
89 break;
90 }
91 if (pUVM->vm.s.pReqs)
92 {
93 /*
94 * Service execute in EMT request.
95 */
96 rc = VMR3ReqProcessU(pUVM);
97 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
98 }
99 else
100 {
101 /*
102 * Nothing important is pending, so wait for something.
103 */
104 rc = VMR3WaitU(pUVM);
105 if (VBOX_FAILURE(rc))
106 break;
107 }
108 }
109 else
110 {
111
112 /*
113 * Pending requests which needs servicing?
114 *
115 * We check for state changes in addition to status codes when
116 * servicing requests. (Look after the ifs.)
117 */
118 PVM pVM = pUVM->pVM;
119 enmBefore = pVM->enmVMState;
120 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
121 || pUVM->vm.s.fTerminateEMT)
122 {
123 rc = VINF_EM_TERMINATE;
124 break;
125 }
126 if (pUVM->vm.s.pReqs)
127 {
128 /*
129 * Service execute in EMT request.
130 */
131 rc = VMR3ReqProcessU(pUVM);
132 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
133 }
134 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
135 {
136 /*
137 * Service the debugger request.
138 */
139 rc = DBGFR3VMMForcedAction(pVM);
140 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
141 }
142 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
143 {
144 /*
145 * Service a delayed reset request.
146 */
147 rc = VMR3Reset(pVM);
148 VM_FF_CLEAR(pVM, VM_FF_RESET);
149 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
150 }
151 else
152 {
153 /*
154 * Nothing important is pending, so wait for something.
155 */
156 rc = VMR3WaitU(pUVM);
157 if (VBOX_FAILURE(rc))
158 break;
159 }
160
161 /*
162 * Check for termination requests, these have extremely high priority.
163 */
164 if ( rc == VINF_EM_TERMINATE
165 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
166 || pUVM->vm.s.fTerminateEMT)
167 break;
168 }
169
170 /*
171 * Some requests (both VMR3Req* and the DBGF) can potentially
172 * resume or start the VM, in that case we'll get a change in
173 * VM status indicating that we're now running.
174 */
175 if ( VBOX_SUCCESS(rc)
176 && pUVM->pVM
177 && enmBefore != pUVM->pVM->enmVMState
178 && pUVM->pVM->enmVMState == VMSTATE_RUNNING)
179 {
180 PVM pVM = pUVM->pVM;
181 rc = EMR3ExecuteVM(pVM);
182 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Vrc, enmVMState=%d\n", rc, pVM->enmVMState));
183 if (EMGetState(pVM) == EMSTATE_GURU_MEDITATION)
184 vmR3SetState(pVM, VMSTATE_GURU_MEDITATION);
185 }
186
187 } /* forever */
188
189
190 /*
191 * Exiting.
192 */
193 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Vrc enmBefore=%d enmVMState=%d\n",
194 ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
195 if (pUVM->vm.s.fEMTDoesTheCleanup)
196 {
197 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
198 Assert(pUVM->pVM);
199 vmR3Destroy(pUVM->pVM);
200 vmR3DestroyFinalBitFromEMT(pUVM);
201 }
202 else
203 {
204 vmR3DestroyFinalBitFromEMT(pUVM);
205
206 /* we don't reset ThreadEMT here because it's used in waiting. */
207 pUVM->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
208 }
209 Log(("vmR3EmulationThread: EMT is terminated.\n"));
210 return rc;
211}
212
213
214/**
215 * Wait for VM to be resumed. Handle events like vmR3EmulationThread does.
216 * In case the VM is stopped, clean up and long jump to the main EMT loop.
217 *
218 * @returns VINF_SUCCESS or doesn't return
219 * @param pVM VM handle.
220 */
221VMR3DECL(int) VMR3WaitForResume(PVM pVM)
222{
223 /*
224 * The request loop.
225 */
226 PUVM pUVM = pVM->pUVM;
227 VMSTATE enmBefore;
228 int rc;
229 for (;;)
230 {
231
232 /*
233 * Pending requests which needs servicing?
234 *
235 * We check for state changes in addition to status codes when
236 * servicing requests. (Look after the ifs.)
237 */
238 enmBefore = pVM->enmVMState;
239 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
240 || pUVM->vm.s.fTerminateEMT)
241 {
242 rc = VINF_EM_TERMINATE;
243 break;
244 }
245 else if (pUVM->vm.s.pReqs)
246 {
247 /*
248 * Service execute in EMT request.
249 */
250 rc = VMR3ReqProcessU(pUVM);
251 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
252 }
253 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
254 {
255 /*
256 * Service the debugger request.
257 */
258 rc = DBGFR3VMMForcedAction(pVM);
259 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
260 }
261 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
262 {
263 /*
264 * Service a delay reset request.
265 */
266 rc = VMR3Reset(pVM);
267 VM_FF_CLEAR(pVM, VM_FF_RESET);
268 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
269 }
270 else
271 {
272 /*
273 * Nothing important is pending, so wait for something.
274 */
275 rc = VMR3WaitU(pUVM);
276 if (VBOX_FAILURE(rc))
277 break;
278 }
279
280 /*
281 * Check for termination requests, these are extremely high priority.
282 */
283 if ( rc == VINF_EM_TERMINATE
284 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
285 || pUVM->vm.s.fTerminateEMT)
286 break;
287
288 /*
289 * Some requests (both VMR3Req* and the DBGF) can potentially
290 * resume or start the VM, in that case we'll get a change in
291 * VM status indicating that we're now running.
292 */
293 if ( VBOX_SUCCESS(rc)
294 && enmBefore != pVM->enmVMState
295 && pVM->enmVMState == VMSTATE_RUNNING)
296 {
297 /* Only valid exit reason. */
298 return VINF_SUCCESS;
299 }
300
301 } /* forever */
302
303 /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */
304 longjmp(pUVM->vm.s.emtJumpEnv, 1);
305}
306
307
308/**
309 * Gets the name of a halt method.
310 *
311 * @returns Pointer to a read only string.
312 * @param enmMethod The method.
313 */
314static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
315{
316 switch (enmMethod)
317 {
318 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
319 case VMHALTMETHOD_DEFAULT: return "default";
320 case VMHALTMETHOD_OLD: return "old";
321 case VMHALTMETHOD_1: return "method1";
322 //case VMHALTMETHOD_2: return "method2";
323 case VMHALTMETHOD_GLOBAL_1: return "global1";
324 default: return "unknown";
325 }
326}
327
328
329/**
330 * The old halt loop.
331 *
332 * @param pUVM Pointer to the user mode VM structure.
333 */
334static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVM pUVM, const uint32_t fMask, uint64_t /* u64Now*/)
335{
336 /*
337 * Halt loop.
338 */
339 PVM pVM = pUVM->pVM;
340 int rc = VINF_SUCCESS;
341 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
342 //unsigned cLoops = 0;
343 for (;;)
344 {
345 /*
346 * Work the timers and check if we can exit.
347 * The poll call gives us the ticks left to the next event in
348 * addition to perhaps set an FF.
349 */
350 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
351 PDMR3Poll(pVM);
352 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
353 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
354 TMR3TimerQueuesDo(pVM);
355 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
356 if (VM_FF_ISPENDING(pVM, fMask))
357 break;
358 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
359 if (VM_FF_ISPENDING(pVM, fMask))
360 break;
361
362 /*
363 * Wait for a while. Someone will wake us up or interrupt the call if
364 * anything needs our attention.
365 */
366 if (u64NanoTS < 50000)
367 {
368 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
369 /* spin */;
370 }
371 else
372 {
373 VMMR3YieldStop(pVM);
374 //uint64_t u64Start = RTTimeNanoTS();
375 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
376 {
377 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
378 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, a);
379 RTThreadYield(); /* this is the best we can do here */
380 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, a);
381 }
382 else if (u64NanoTS < 2000000)
383 {
384 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
385 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
386 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1);
387 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
388 }
389 else
390 {
391 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
392 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
393 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
394 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
395 }
396 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
397 //RTLogPrintf(" -> rc=%Vrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
398 }
399 if (rc == VERR_TIMEOUT)
400 rc = VINF_SUCCESS;
401 else if (VBOX_FAILURE(rc))
402 {
403 AssertRC(rc != VERR_INTERRUPTED);
404 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
405 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
406 VM_FF_SET(pVM, VM_FF_TERMINATE);
407 rc = VERR_INTERNAL_ERROR;
408 break;
409 }
410 }
411
412 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
413 return rc;
414}
415
416
417/**
418 * Initialize the configuration of halt method 1 & 2.
419 *
420 * @return VBox status code. Failure on invalid CFGM data.
421 * @param pVM The VM handle.
422 */
423static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
424{
425 /*
426 * The defaults.
427 */
428#if 1 /* DEBUGGING STUFF - REMOVE LATER */
429 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
430 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
431 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
432 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
433 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
434#else
435 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
436 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
437 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
438 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
439 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
440#endif
441
442 /*
443 * Query overrides.
444 *
445 * I don't have time to bother with niceities such as invalid value checks
446 * here right now. sorry.
447 */
448 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
449 if (pCfg)
450 {
451 uint32_t u32;
452 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
453 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
454 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
455 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
456 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
457 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
458 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
459 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
460 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
461 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
462 LogRel(("HaltedMethod1 config: %d/%d/%d/%d/%d\n",
463 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
464 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
465 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
466 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
467 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
468 }
469
470 return VINF_SUCCESS;
471}
472
473
474/**
475 * Initialize halt method 1.
476 *
477 * @return VBox status code.
478 * @param pUVM Pointer to the user mode VM structure.
479 */
480static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
481{
482 return vmR3HaltMethod12ReadConfigU(pUVM);
483}
484
485
486/**
487 * Method 1 - Block whenever possible, and when lagging behind
488 * switch to spinning for 10-30ms with occational blocking until
489 * the lag has been eliminated.
490 */
491static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
492{
493 PVM pVM = pUVM->pVM;
494
495 /*
496 * To simplify things, we decide up-front whether we should switch to spinning or
497 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
498 * and that it will generate interrupts or other events that will cause us to exit
499 * the halt loop.
500 */
501 bool fBlockOnce = false;
502 bool fSpinning = false;
503 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
504 if (u32CatchUpPct /* non-zero if catching up */)
505 {
506 if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
507 {
508 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
509 if (fSpinning)
510 {
511 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
512 fBlockOnce = u64Now - pUVM->vm.s.Halt.Method12.u64LastBlockTS
513 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
514 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
515 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
516 }
517 else
518 {
519 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
520 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
521 }
522 }
523 else
524 {
525 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
526 if (fSpinning)
527 pUVM->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
528 }
529 }
530 else if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
531 {
532 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
533 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
534 }
535
536 /*
537 * Halt loop.
538 */
539 int rc = VINF_SUCCESS;
540 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
541 unsigned cLoops = 0;
542 for (;; cLoops++)
543 {
544 /*
545 * Work the timers and check if we can exit.
546 */
547 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
548 PDMR3Poll(pVM);
549 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
550 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
551 TMR3TimerQueuesDo(pVM);
552 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
553 if (VM_FF_ISPENDING(pVM, fMask))
554 break;
555
556 /*
557 * Estimate time left to the next event.
558 */
559 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
560 if (VM_FF_ISPENDING(pVM, fMask))
561 break;
562
563 /*
564 * Block if we're not spinning and the interval isn't all that small.
565 */
566 if ( ( !fSpinning
567 || fBlockOnce)
568#if 1 /* DEBUGGING STUFF - REMOVE LATER */
569 && u64NanoTS >= 100000) /* 0.100 ms */
570#else
571 && u64NanoTS >= 250000) /* 0.250 ms */
572#endif
573 {
574 const uint64_t Start = pUVM->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
575 VMMR3YieldStop(pVM);
576
577 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
578 if (cMilliSecs <= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
579 cMilliSecs = 1;
580 else
581 cMilliSecs -= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
582 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
583 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
584 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, cMilliSecs);
585 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
586 if (rc == VERR_TIMEOUT)
587 rc = VINF_SUCCESS;
588 else if (VBOX_FAILURE(rc))
589 {
590 AssertRC(rc != VERR_INTERRUPTED);
591 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
592 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
593 VM_FF_SET(pVM, VM_FF_TERMINATE);
594 rc = VERR_INTERNAL_ERROR;
595 break;
596 }
597
598 /*
599 * Calc the statistics.
600 * Update averages every 16th time, and flush parts of the history every 64th time.
601 */
602 const uint64_t Elapsed = RTTimeNanoTS() - Start;
603 pUVM->vm.s.Halt.Method12.cNSBlocked += Elapsed;
604 if (Elapsed > u64NanoTS)
605 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
606 pUVM->vm.s.Halt.Method12.cBlocks++;
607 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0xf))
608 {
609 pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVM->vm.s.Halt.Method12.cNSBlockedTooLong / pUVM->vm.s.Halt.Method12.cBlocks;
610 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0x3f))
611 {
612 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong = pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
613 pUVM->vm.s.Halt.Method12.cBlocks = 0x40;
614 }
615 }
616 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
617
618 /*
619 * Clear the block once flag if we actually blocked.
620 */
621 if ( fBlockOnce
622 && Elapsed > 100000 /* 0.1 ms */)
623 fBlockOnce = false;
624 }
625 }
626 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
627
628 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
629 return rc;
630}
631
632
633/**
634 * Initialize the global 1 halt method.
635 *
636 * @return VBox status code.
637 * @param pUVM Pointer to the user mode VM structure.
638 */
639static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
640{
641 return VINF_SUCCESS;
642}
643
644
645/**
646 * The global 1 halt method - Block in GMM (ring-0) and let it
647 * try take care of the global scheduling of EMT threads.
648 */
649static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
650{
651 PVM pVM = pUVM->pVM;
652
653 /*
654 * Halt loop.
655 */
656 int rc = VINF_SUCCESS;
657 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
658 unsigned cLoops = 0;
659 for (;; cLoops++)
660 {
661 /*
662 * Work the timers and check if we can exit.
663 */
664 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
665 PDMR3Poll(pVM);
666 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
667 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
668 TMR3TimerQueuesDo(pVM);
669 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
670 if (VM_FF_ISPENDING(pVM, fMask))
671 break;
672
673 /*
674 * Estimate time left to the next event.
675 */
676 uint64_t u64Delta;
677 uint64_t u64GipTime = TMTimerPollGIP(pVM, &u64Delta);
678 if (VM_FF_ISPENDING(pVM, fMask))
679 break;
680
681 /*
682 * Block if we're not spinning and the interval isn't all that small.
683 */
684 if (u64Delta > 50000 /* 0.050ms */)
685 {
686 VMMR3YieldStop(pVM);
687 if (VM_FF_ISPENDING(pVM, fMask))
688 break;
689
690 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
691 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, c);
692 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
693 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, c);
694 if (rc == VERR_INTERRUPTED)
695 rc = VINF_SUCCESS;
696 else if (VBOX_FAILURE(rc))
697 {
698 AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Vrc\n", rc));
699 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
700 VM_FF_SET(pVM, VM_FF_TERMINATE);
701 rc = VERR_INTERNAL_ERROR;
702 break;
703 }
704 }
705 /*
706 * When spinning call upon the GVMM and do some wakups once
707 * in a while, it's not like we're actually busy or anything.
708 */
709 else if (!(cLoops & 0x1fff))
710 {
711 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, d);
712 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
713 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, d);
714 }
715 }
716 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
717
718 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
719 return rc;
720}
721
722
723/**
724 * The global 1 halt method - VMR3Wait() worker.
725 *
726 * @returns VBox status code.
727 * @param pUVM Pointer to the user mode VM structure.
728 */
729static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVM pUVM)
730{
731 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
732
733 PVM pVM = pUVM->pVM;
734 int rc = VINF_SUCCESS;
735 for (;;)
736 {
737 /*
738 * Check Relevant FFs.
739 */
740 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
741 break;
742
743 /*
744 * Wait for a while. Someone will wake us up or interrupt the call if
745 * anything needs our attention.
746 */
747 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
748 if (rc == VERR_INTERRUPTED)
749 rc = VINF_SUCCESS;
750 else if (VBOX_FAILURE(rc))
751 {
752 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
753 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
754 VM_FF_SET(pVM, VM_FF_TERMINATE);
755 rc = VERR_INTERNAL_ERROR;
756 break;
757 }
758
759 }
760
761 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
762 return rc;
763}
764
765
766/**
767 * The global 1 halt method - VMR3NotifyFF() worker.
768 *
769 * @param pUVM Pointer to the user mode VM structure.
770 * @param fNotifiedREM See VMR3NotifyFF().
771 */
772static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVM pUVM, bool fNotifiedREM)
773{
774 if (pUVM->vm.s.fWait)
775 {
776 int rc = SUPCallVMMR0Ex(pUVM->pVM->pVMR0, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
777 AssertRC(rc);
778 }
779 else if (!fNotifiedREM)
780 REMR3NotifyFF(pUVM->pVM);
781}
782
783
784/**
785 * Bootstrap VMR3Wait() worker.
786 *
787 * @returns VBox status code.
788 * @param pUVM Pointer to the user mode VM structure.
789 */
790static DECLCALLBACK(int) vmR3BootstrapWait(PUVM pUVM)
791{
792 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
793
794 int rc = VINF_SUCCESS;
795 for (;;)
796 {
797 /*
798 * Check Relevant FFs.
799 */
800 if (pUVM->vm.s.pReqs)
801 break;
802 if ( pUVM->pVM
803 && VM_FF_ISPENDING(pUVM->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
804 break;
805 if (pUVM->vm.s.fTerminateEMT)
806 break;
807
808 /*
809 * Wait for a while. Someone will wake us up or interrupt the call if
810 * anything needs our attention.
811 */
812 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
813 if (rc == VERR_TIMEOUT)
814 rc = VINF_SUCCESS;
815 else if (VBOX_FAILURE(rc))
816 {
817 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
818 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
819 if (pUVM->pVM)
820 VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE);
821 rc = VERR_INTERNAL_ERROR;
822 break;
823 }
824
825 }
826
827 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
828 return rc;
829}
830
831
832/**
833 * Bootstrap VMR3NotifyFF() worker.
834 *
835 * @param pUVM Pointer to the user mode VM structure.
836 * @param fNotifiedREM See VMR3NotifyFF().
837 */
838static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVM pUVM, bool fNotifiedREM)
839{
840 if (pUVM->vm.s.fWait)
841 {
842 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
843 AssertRC(rc);
844 }
845}
846
847
848
849/**
850 * Default VMR3Wait() worker.
851 *
852 * @returns VBox status code.
853 * @param pUVM Pointer to the user mode VM structure.
854 */
855static DECLCALLBACK(int) vmR3DefaultWait(PUVM pUVM)
856{
857 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
858
859 PVM pVM = pUVM->pVM;
860 int rc = VINF_SUCCESS;
861 for (;;)
862 {
863 /*
864 * Check Relevant FFs.
865 */
866 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
867 break;
868
869 /*
870 * Wait for a while. Someone will wake us up or interrupt the call if
871 * anything needs our attention.
872 */
873 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
874 if (rc == VERR_TIMEOUT)
875 rc = VINF_SUCCESS;
876 else if (VBOX_FAILURE(rc))
877 {
878 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
879 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
880 VM_FF_SET(pVM, VM_FF_TERMINATE);
881 rc = VERR_INTERNAL_ERROR;
882 break;
883 }
884
885 }
886
887 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
888 return rc;
889}
890
891
892/**
893 * Default VMR3NotifyFF() worker.
894 *
895 * @param pUVM Pointer to the user mode VM structure.
896 * @param fNotifiedREM See VMR3NotifyFF().
897 */
898static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVM pUVM, bool fNotifiedREM)
899{
900 if (pUVM->vm.s.fWait)
901 {
902 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
903 AssertRC(rc);
904 }
905 else if (!fNotifiedREM)
906 REMR3NotifyFF(pUVM->pVM);
907}
908
909
910/**
911 * Array with halt method descriptors.
912 * VMINT::iHaltMethod contains an index into this array.
913 */
914static const struct VMHALTMETHODDESC
915{
916 /** The halt method id. */
917 VMHALTMETHOD enmHaltMethod;
918 /** The init function for loading config and initialize variables. */
919 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
920 /** The term function. */
921 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
922 /** The halt function. */
923 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVM pUVM, const uint32_t fMask, uint64_t u64Now));
924 /** The wait function. */
925 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVM pUVM));
926 /** The notifyFF function. */
927 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVM pUVM, bool fNotifiedREM));
928} g_aHaltMethods[] =
929{
930 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyFF },
931 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF },
932 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyFF },
933 //{ VMHALTMETHOD_2, vmR3HaltMethod2Init, vmR3HaltMethod2Term, vmR3HaltMethod2DoHalt, vmR3HaltMethod2Wait, vmR3HaltMethod2NotifyFF },
934 { VMHALTMETHOD_GLOBAL_1,vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyFF },
935};
936
937
938/**
939 * Notify the emulation thread (EMT) about pending Forced Action (FF).
940 *
941 * This function is called by thread other than EMT to make
942 * sure EMT wakes up and promptly service an FF request.
943 *
944 * @param pVM VM handle.
945 * @param fNotifiedREM Set if REM have already been notified. If clear the
946 * generic REMR3NotifyFF() method is called.
947 */
948VMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM)
949{
950 LogFlow(("VMR3NotifyFF:\n"));
951 PUVM pUVM = pVM->pUVM;
952 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
953}
954
955
956/**
957 * Notify the emulation thread (EMT) about pending Forced Action (FF).
958 *
959 * This function is called by thread other than EMT to make
960 * sure EMT wakes up and promptly service an FF request.
961 *
962 * @param pUVM Pointer to the user mode VM structure.
963 * @param fNotifiedREM Set if REM have already been notified. If clear the
964 * generic REMR3NotifyFF() method is called.
965 */
966VMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM)
967{
968 LogFlow(("VMR3NotifyFF:\n"));
969 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
970}
971
972
973/**
974 * Halted VM Wait.
975 * Any external event will unblock the thread.
976 *
977 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
978 * case an appropriate status code is returned.
979 * @param pVM VM handle.
980 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
981 * @thread The emulation thread.
982 */
983VMR3DECL(int) VMR3WaitHalted(PVM pVM, bool fIgnoreInterrupts)
984{
985 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
986
987 /*
988 * Check Relevant FFs.
989 */
990 const uint32_t fMask = !fIgnoreInterrupts
991 ? VM_FF_EXTERNAL_HALTED_MASK
992 : VM_FF_EXTERNAL_HALTED_MASK & ~(VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC);
993 if (VM_FF_ISPENDING(pVM, fMask))
994 {
995 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
996 return VINF_SUCCESS;
997 }
998
999 /*
1000 * The yielder is suspended while we're halting.
1001 */
1002 VMMR3YieldSuspend(pVM);
1003
1004 /*
1005 * Record halt averages for the last second.
1006 */
1007 PUVM pUVM = pVM->pUVM;
1008 uint64_t u64Now = RTTimeNanoTS();
1009 int64_t off = u64Now - pUVM->vm.s.u64HaltsStartTS;
1010 if (off > 1000000000)
1011 {
1012 if (off > _4G || !pUVM->vm.s.cHalts)
1013 {
1014 pUVM->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1015 pUVM->vm.s.HaltFrequency = 1;
1016 }
1017 else
1018 {
1019 pUVM->vm.s.HaltInterval = (uint32_t)off / pUVM->vm.s.cHalts;
1020 pUVM->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVM->vm.s.cHalts, 1000000000, (uint32_t)off);
1021 }
1022 pUVM->vm.s.u64HaltsStartTS = u64Now;
1023 pUVM->vm.s.cHalts = 0;
1024 }
1025 pUVM->vm.s.cHalts++;
1026
1027 /*
1028 * Do the halt.
1029 */
1030 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVM, fMask, u64Now);
1031
1032 /*
1033 * Resume the yielder.
1034 */
1035 VMMR3YieldResume(pVM);
1036
1037 LogFlow(("VMR3WaitHalted: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions));
1038 return rc;
1039}
1040
1041
1042/**
1043 * Suspended VM Wait.
1044 * Only a handful of forced actions will cause the function to
1045 * return to the caller.
1046 *
1047 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
1048 * case an appropriate status code is returned.
1049 * @param pUVM Pointer to the user mode VM structure.
1050 * @thread The emulation thread.
1051 */
1052VMR3DECL(int) VMR3WaitU(PUVM pUVM)
1053{
1054 LogFlow(("VMR3WaitU:\n"));
1055
1056 /*
1057 * Check Relevant FFs.
1058 */
1059 PVM pVM = pUVM->pVM;
1060 if ( pVM
1061 && VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
1062 {
1063 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
1064 return VINF_SUCCESS;
1065 }
1066
1067 /*
1068 * Do waiting according to the halt method (so VMR3NotifyFF
1069 * doesn't have to special case anything).
1070 */
1071 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVM);
1072 LogFlow(("VMR3WaitU: returns %Vrc (FF %#x)\n", rc, pVM ? pVM->fForcedActions : 0));
1073 return rc;
1074}
1075
1076
1077/**
1078 * Changes the halt method.
1079 *
1080 * @returns VBox status code.
1081 * @param pUVM Pointer to the user mode VM structure.
1082 * @param enmHaltMethod The new halt method.
1083 * @thread EMT.
1084 */
1085int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1086{
1087 PVM pVM = pUVM->pVM; Assert(pVM);
1088 VM_ASSERT_EMT(pVM);
1089 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1090
1091 /*
1092 * Resolve default (can be overridden in the configuration).
1093 */
1094 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1095 {
1096 uint32_t u32;
1097 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1098 if (VBOX_SUCCESS(rc))
1099 {
1100 enmHaltMethod = (VMHALTMETHOD)u32;
1101 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1102 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1103 }
1104 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1105 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1106 else
1107 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1108 //enmHaltMethod = VMHALTMETHOD_1;
1109 //enmHaltMethod = VMHALTMETHOD_OLD;
1110 }
1111 LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1112
1113 /*
1114 * Find the descriptor.
1115 */
1116 unsigned i = 0;
1117 while ( i < RT_ELEMENTS(g_aHaltMethods)
1118 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1119 i++;
1120 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1121
1122 /*
1123 * Terminate the old one.
1124 */
1125 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1126 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1127 {
1128 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1129 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1130 }
1131
1132 /*
1133 * Init the new one.
1134 */
1135 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1136 if (g_aHaltMethods[i].pfnInit)
1137 {
1138 int rc = g_aHaltMethods[i].pfnInit(pUVM);
1139 AssertRCReturn(rc, rc);
1140 }
1141 pUVM->vm.s.enmHaltMethod = enmHaltMethod;
1142
1143 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1144 return VINF_SUCCESS;
1145}
1146
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette