VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 4013

Last change on this file since 4013 was 4013, checked in by vboxsync, 17 years ago

pdm.h = include pdm*.h; pdmapi.h = only the 'core' pdm APIs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.2 KB
Line 
1/* $Id: VMEmt.cpp 4013 2007-08-03 00:11:38Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/tm.h>
28#include <VBox/dbgf.h>
29#include <VBox/em.h>
30#include <VBox/pdmapi.h>
31#include <VBox/rem.h>
32#include "VMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/semaphore.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42#include <iprt/time.h>
43
44
45
46
47/**
48 * The emulation thread.
49 *
50 * @returns Thread exit code.
51 * @param ThreadSelf The handle to the executing thread.
52 * @param pvArgs Pointer to a VMEMULATIONTHREADARGS structure.
53 */
54DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
55{
56 PVMEMULATIONTHREADARGS pArgs = (PVMEMULATIONTHREADARGS)pvArgs;
57 AssertReleaseMsg(pArgs && pArgs->pVM, ("Invalid arguments to the emulation thread!\n"));
58
59 /*
60 * Init the native thread member.
61 */
62 PVM pVM = pArgs->pVM;
63 pVM->NativeThreadEMT = RTThreadGetNative(ThreadSelf);
64
65 /*
66 * The request loop.
67 */
68 VMSTATE enmBefore;
69 int rc;
70 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pVM=%p\n", ThreadSelf, pVM));
71 for (;;)
72 {
73 /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */
74 if (setjmp(pVM->vm.s.emtJumpEnv) != 0)
75 {
76 rc = VINF_SUCCESS;
77 break;
78 }
79
80 /*
81 * Pending requests which needs servicing?
82 *
83 * We check for state changes in addition to status codes when
84 * servicing requests. (Look after the ifs.)
85 */
86 enmBefore = pVM->enmVMState;
87 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
88 {
89 rc = VINF_EM_TERMINATE;
90 break;
91 }
92 else if (pVM->vm.s.pReqs)
93 {
94 /*
95 * Service execute in EMT request.
96 */
97 rc = VMR3ReqProcess(pVM);
98 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
99 }
100 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
101 {
102 /*
103 * Service the debugger request.
104 */
105 rc = DBGFR3VMMForcedAction(pVM);
106 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
107 }
108 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
109 {
110 /*
111 * Service a delay reset request.
112 */
113 rc = VMR3Reset(pVM);
114 VM_FF_CLEAR(pVM, VM_FF_RESET);
115 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
116 }
117 else
118 {
119 /*
120 * Nothing important is pending, so wait for something.
121 */
122 rc = VMR3Wait(pVM);
123 if (VBOX_FAILURE(rc))
124 break;
125 }
126
127 /*
128 * Check for termination requests, these are extremely high priority.
129 */
130 if ( rc == VINF_EM_TERMINATE
131 || VM_FF_ISSET(pVM, VM_FF_TERMINATE))
132 break;
133
134 /*
135 * Some requests (both VMR3Req* and the DBGF) can potentially
136 * resume or start the VM, in that case we'll get a change in
137 * VM status indicating that we're now running.
138 */
139 if ( VBOX_SUCCESS(rc)
140 && enmBefore != pVM->enmVMState
141 && (pVM->enmVMState == VMSTATE_RUNNING))
142 {
143 rc = EMR3ExecuteVM(pVM);
144 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Vrc, enmVMState=%d\n", rc, pVM->enmVMState));
145 }
146
147 } /* forever */
148
149
150 /*
151 * Exiting.
152 */
153 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pVM=%p rc=%Vrc enmBefore=%d enmVMState=%d\n",
154 ThreadSelf, pVM, rc, enmBefore, pVM->enmVMState));
155 if (pVM->vm.s.fEMTDoesTheCleanup)
156 {
157 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
158 vmR3Destroy(pVM);
159 vmR3DestroyFinalBit(pVM);
160 Log(("vmR3EmulationThread: EMT is terminated.\n"));
161 }
162 else
163 {
164 /* we don't reset ThreadEMT here because it's used in waiting. */
165 pVM->NativeThreadEMT = NIL_RTNATIVETHREAD;
166 }
167 return rc;
168}
169
170
171/**
172 * Wait for VM to be resumed. Handle events like vmR3EmulationThread does.
173 * In case the VM is stopped, clean up and long jump to the main EMT loop.
174 *
175 * @returns VINF_SUCCESS or doesn't return
176 * @param pVM VM handle.
177 */
178VMR3DECL(int) VMR3WaitForResume(PVM pVM)
179{
180 /*
181 * The request loop.
182 */
183 VMSTATE enmBefore;
184 int rc;
185 for (;;)
186 {
187
188 /*
189 * Pending requests which needs servicing?
190 *
191 * We check for state changes in addition to status codes when
192 * servicing requests. (Look after the ifs.)
193 */
194 enmBefore = pVM->enmVMState;
195 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
196 {
197 rc = VINF_EM_TERMINATE;
198 break;
199 }
200 else if (pVM->vm.s.pReqs)
201 {
202 /*
203 * Service execute in EMT request.
204 */
205 rc = VMR3ReqProcess(pVM);
206 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
207 }
208 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
209 {
210 /*
211 * Service the debugger request.
212 */
213 rc = DBGFR3VMMForcedAction(pVM);
214 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
215 }
216 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
217 {
218 /*
219 * Service a delay reset request.
220 */
221 rc = VMR3Reset(pVM);
222 VM_FF_CLEAR(pVM, VM_FF_RESET);
223 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
224 }
225 else
226 {
227 /*
228 * Nothing important is pending, so wait for something.
229 */
230 rc = VMR3Wait(pVM);
231 if (VBOX_FAILURE(rc))
232 break;
233 }
234
235 /*
236 * Check for termination requests, these are extremely high priority.
237 */
238 if ( rc == VINF_EM_TERMINATE
239 || VM_FF_ISSET(pVM, VM_FF_TERMINATE))
240 break;
241
242 /*
243 * Some requests (both VMR3Req* and the DBGF) can potentially
244 * resume or start the VM, in that case we'll get a change in
245 * VM status indicating that we're now running.
246 */
247 if ( VBOX_SUCCESS(rc)
248 && enmBefore != pVM->enmVMState
249 && (pVM->enmVMState == VMSTATE_RUNNING))
250 {
251 /* Only valid exit reason. */
252 return VINF_SUCCESS;
253 }
254
255 } /* forever */
256
257 /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */
258 longjmp(pVM->vm.s.emtJumpEnv, 1);
259}
260
261
262/**
263 * The old halt loop.
264 */
265static DECLCALLBACK(int) vmR3HaltOldDoHalt(PVM pVM, const uint32_t fMask, uint64_t /* u64Now*/)
266{
267 /*
268 * Halt loop.
269 */
270 int rc = VINF_SUCCESS;
271 ASMAtomicXchgU32(&pVM->vm.s.fWait, 1);
272 //unsigned cLoops = 0;
273 for (;;)
274 {
275 /*
276 * Work the timers and check if we can exit.
277 * The poll call gives us the ticks left to the next event in
278 * addition to perhaps set an FF.
279 */
280 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltPoll, a);
281 PDMR3Poll(pVM);
282 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltPoll, a);
283 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltTimers, b);
284 TMR3TimerQueuesDo(pVM);
285 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltTimers, b);
286 if (VM_FF_ISPENDING(pVM, fMask))
287 break;
288 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
289 if (VM_FF_ISPENDING(pVM, fMask))
290 break;
291
292 /*
293 * Wait for a while. Someone will wake us up or interrupt the call if
294 * anything needs our attention.
295 */
296 if (u64NanoTS < 50000)
297 {
298 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
299 /* spin */;
300 }
301 else
302 {
303 VMMR3YieldStop(pVM);
304 //uint64_t u64Start = RTTimeNanoTS();
305 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
306 {
307 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
308 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltYield, a);
309 RTThreadYield(); /* this is the best we can do here */
310 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltYield, a);
311 }
312 else if (u64NanoTS < 2000000)
313 {
314 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
315 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltBlock, a);
316 rc = RTSemEventWait(pVM->vm.s.EventSemWait, 1);
317 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltBlock, a);
318 }
319 else
320 {
321 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
322 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltBlock, a);
323 rc = RTSemEventWait(pVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
324 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltBlock, a);
325 }
326 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
327 //RTLogPrintf(" -> rc=%Vrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
328 }
329 if (rc == VERR_TIMEOUT)
330 rc = VINF_SUCCESS;
331 else if (VBOX_FAILURE(rc))
332 {
333 AssertRC(rc != VERR_INTERRUPTED);
334 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
335 VM_FF_SET(pVM, VM_FF_TERMINATE);
336 rc = VERR_INTERNAL_ERROR;
337 break;
338 }
339 }
340
341 return rc;
342}
343
344
345/**
346 * Initialize the configuration of halt method 1 & 2.
347 *
348 * @return VBox status code. Failure on invalid CFGM data.
349 * @param pVM The VM handle.
350 */
351static int vmR3HaltMethod12ReadConfig(PVM pVM)
352{
353 /*
354 * The defaults.
355 */
356 pVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
357 pVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
358 pVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
359 pVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
360 pVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
361
362 /*
363 * Query overrides.
364 */
365 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/VMM/HaltedMethod1");
366 if (pCfg)
367 {
368
369 }
370
371 return VINF_SUCCESS;
372}
373
374
375/**
376 * Initialize halt method 1.
377 *
378 * @return VBox status code.
379 * @param pVM The VM handle.
380 */
381static DECLCALLBACK(int) vmR3HaltMethod1Init(PVM pVM)
382{
383 return vmR3HaltMethod12ReadConfig(pVM);
384}
385
386
387/**
388 * Method 1 - Block whenever possible, and when lagging behind
389 * switch to spinning for 10-30ms with occational blocking until
390 * the lag has been eliminated.
391 */
392static DECLCALLBACK(int) vmR3HaltMethod1DoHalt(PVM pVM, const uint32_t fMask, uint64_t u64Now)
393{
394 /*
395 * To simplify things, we decide up-front whether we should switch to spinning or
396 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
397 * and that it will generate interrupts or other events that will cause us to exit
398 * the halt loop.
399 */
400 bool fBlockOnce = false;
401 bool fSpinning = false;
402 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
403 if (u32CatchUpPct /* non-zero if catching up */)
404 {
405 if (pVM->vm.s.Halt.Method12.u64StartSpinTS)
406 {
407 fSpinning = TMVirtualSyncGetLag(pVM) >= pVM->vm.s.Halt.Method12.u32StopSpinningCfg;
408 if (fSpinning)
409 {
410 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
411 fBlockOnce = u64Now - pVM->vm.s.Halt.Method12.u64LastBlockTS
412 > RT_MAX(pVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
413 RT_MIN(u64Lag / pVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
414 pVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
415 }
416 else
417 {
418 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
419 pVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
420 }
421 }
422 else
423 {
424 fSpinning = TMVirtualSyncGetLag(pVM) >= pVM->vm.s.Halt.Method12.u32StartSpinningCfg;
425 if (fSpinning)
426 pVM->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
427 }
428 }
429 else if (pVM->vm.s.Halt.Method12.u64StartSpinTS)
430 {
431 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
432 pVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
433 }
434
435 /*
436 * Halt loop.
437 */
438 int rc = VINF_SUCCESS;
439 ASMAtomicXchgU32(&pVM->vm.s.fWait, 1);
440 unsigned cLoops = 0;
441 for (;; cLoops++)
442 {
443 /*
444 * Work the timers and check if we can exit.
445 */
446 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltPoll, a);
447 PDMR3Poll(pVM);
448 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltPoll, a);
449 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltTimers, b);
450 TMR3TimerQueuesDo(pVM);
451 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltTimers, b);
452 if (VM_FF_ISPENDING(pVM, fMask))
453 break;
454
455 /*
456 * Estimate time left to the next event.
457 */
458 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
459 if (VM_FF_ISPENDING(pVM, fMask))
460 break;
461
462 /*
463 * Block if we're not spinning and the interval isn't all that small.
464 */
465 if ( ( !fSpinning
466 || fBlockOnce)
467 && u64NanoTS >= 250000) /* 0.250 ms */
468 {
469 const uint64_t Start = pVM->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
470 VMMR3YieldStop(pVM);
471
472 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
473 if (cMilliSecs <= pVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
474 cMilliSecs = 1;
475 else
476 cMilliSecs -= pVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
477 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
478 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltBlock, a);
479 rc = RTSemEventWait(pVM->vm.s.EventSemWait, cMilliSecs);
480 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltBlock, a);
481 if (rc == VERR_TIMEOUT)
482 rc = VINF_SUCCESS;
483 else if (VBOX_FAILURE(rc))
484 {
485 AssertRC(rc != VERR_INTERRUPTED);
486 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
487 VM_FF_SET(pVM, VM_FF_TERMINATE);
488 rc = VERR_INTERNAL_ERROR;
489 break;
490 }
491
492 /*
493 * Calc the statistics.
494 * Update averages every 16th time, and flush parts of the history every 64th time.
495 */
496 const uint64_t Elapsed = RTTimeNanoTS() - Start;
497 pVM->vm.s.Halt.Method12.cNSBlocked += Elapsed;
498 if (Elapsed > u64NanoTS)
499 pVM->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
500 pVM->vm.s.Halt.Method12.cBlocks++;
501 if (!(pVM->vm.s.Halt.Method12.cBlocks & 0xf))
502 {
503 pVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pVM->vm.s.Halt.Method12.cNSBlockedTooLong / pVM->vm.s.Halt.Method12.cBlocks;
504 if (!(pVM->vm.s.Halt.Method12.cBlocks & 0x3f))
505 {
506 pVM->vm.s.Halt.Method12.cNSBlockedTooLong = pVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
507 pVM->vm.s.Halt.Method12.cBlocks = 0x40;
508 }
509 }
510 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
511
512 /*
513 * Clear the block once flag if we actually blocked.
514 */
515 if ( fBlockOnce
516 && Elapsed > 100000 /* 0.1 ms */)
517 fBlockOnce = false;
518 }
519 }
520 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
521
522 return rc;
523}
524
525
526/**
527 * Default VMR3Wait() worker.
528 *
529 * @returns VBox status code.
530 * @param pVM The VM handle.
531 */
532static DECLCALLBACK(int) vmR3DefaultWait(PVM pVM)
533{
534 int rc = VINF_SUCCESS;
535 ASMAtomicXchgU32(&pVM->vm.s.fWait, 1);
536 for (;;)
537 {
538 /*
539 * Check Relevant FFs.
540 */
541 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
542 break;
543
544 /*
545 * Wait for a while. Someone will wake us up or interrupt the call if
546 * anything needs our attention.
547 */
548 rc = RTSemEventWait(pVM->vm.s.EventSemWait, 1000);
549 if (rc == VERR_TIMEOUT)
550 rc = VINF_SUCCESS;
551 else if (VBOX_FAILURE(rc))
552 {
553 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
554 VM_FF_SET(pVM, VM_FF_TERMINATE);
555 rc = VERR_INTERNAL_ERROR;
556 break;
557 }
558
559 }
560 ASMAtomicXchgU32(&pVM->vm.s.fWait, 0);
561 return rc;
562}
563
564
565/**
566 * Default VMR3NotifyFF() worker.
567 *
568 * @param pVM The VM handle.
569 * @param fNotifiedREM Se VMR3NotifyFF().
570 */
571static DECLCALLBACK(void) vmR3DefaultNotifyFF(PVM pVM, bool fNotifiedREM)
572{
573 if (pVM->vm.s.fWait)
574 {
575 int rc = RTSemEventSignal(pVM->vm.s.EventSemWait);
576 AssertRC(rc);
577 }
578 else if (!fNotifiedREM)
579 REMR3NotifyFF(pVM);
580}
581
582
583/**
584 * Array with halt method descriptors.
585 * VMINT::iHaltMethod contains an index into this array.
586 */
587static const struct VMHALTMETHODDESC
588{
589 /** The halt method id. */
590 VMHALTMETHOD enmHaltMethod;
591 /** The init function for loading config and initialize variables. */
592 DECLR3CALLBACKMEMBER(int, pfnInit,(PVM pVM));
593 /** The term function. */
594 DECLR3CALLBACKMEMBER(void, pfnTerm,(PVM pVM));
595 /** The halt function. */
596 DECLR3CALLBACKMEMBER(int, pfnHalt,(PVM pVM, const uint32_t fMask, uint64_t u64Now));
597 /** The wait function. */
598 DECLR3CALLBACKMEMBER(int, pfnWait,(PVM pVM));
599 /** The notifyFF function. */
600 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PVM pVM, bool fNotifiedREM));
601} g_aHaltMethods[] =
602{
603 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF },
604 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1DoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF },
605 //{ VMHALTMETHOD_2, vmR3HaltMethod2Init, vmR3HaltMethod2Term, vmR3HaltMethod2DoWait, vmR3HaltMethod2Wait, vmR3HaltMethod2NotifyFF },
606};
607
608
609/**
610 * Notify the emulation thread (EMT) about pending Forced Action (FF).
611 *
612 * This function is called by thread other than EMT to make
613 * sure EMT wakes up and promptly service an FF request.
614 *
615 * @param pVM VM handle.
616 * @param fNotifiedREM Set if REM have already been notified. If clear the
617 * generic REMR3NotifyFF() method is called.
618 */
619VMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM)
620{
621 LogFlow(("VMR3NotifyFF:\n"));
622 g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnNotifyFF(pVM, fNotifiedREM);
623}
624
625
626/**
627 * Halted VM Wait.
628 * Any external event will unblock the thread.
629 *
630 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
631 * case an appropriate status code is returned.
632 * @param pVM VM handle.
633 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
634 * @thread The emulation thread.
635 */
636VMR3DECL(int) VMR3WaitHalted(PVM pVM, bool fIgnoreInterrupts)
637{
638 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
639
640 /*
641 * Check Relevant FFs.
642 */
643 const uint32_t fMask = !fIgnoreInterrupts
644 ? VM_FF_EXTERNAL_HALTED_MASK
645 : VM_FF_EXTERNAL_HALTED_MASK & ~(VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC);
646 if (VM_FF_ISPENDING(pVM, fMask))
647 {
648 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
649 return VINF_SUCCESS;
650 }
651
652 /*
653 * The yielder is suspended while we're halting.
654 */
655 VMMR3YieldSuspend(pVM);
656
657 /*
658 * Record halt averages for the last second.
659 */
660 uint64_t u64Now = RTTimeNanoTS();
661 int64_t off = u64Now - pVM->vm.s.u64HaltsStartTS;
662 if (off > 1000000000)
663 {
664 if (off > _4G || !pVM->vm.s.cHalts)
665 {
666 pVM->vm.s.HaltInterval = 1000000000 /* 1 sec */;
667 pVM->vm.s.HaltFrequency = 1;
668 }
669 else
670 {
671 pVM->vm.s.HaltInterval = (uint32_t)off / pVM->vm.s.cHalts;
672 pVM->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pVM->vm.s.cHalts, 1000000000, (uint32_t)off);
673 }
674 pVM->vm.s.u64HaltsStartTS = u64Now;
675 pVM->vm.s.cHalts = 0;
676 }
677 pVM->vm.s.cHalts++;
678
679 /*
680 * Do the halt.
681 */
682 int rc = g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnHalt(pVM, fMask, u64Now);
683
684 /*
685 * Resume the yielder and tell the world we're not blocking.
686 */
687 ASMAtomicXchgU32(&pVM->vm.s.fWait, 0);
688 VMMR3YieldResume(pVM);
689
690 LogFlow(("VMR3WaitHalted: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions));
691 return rc;
692}
693
694
695/**
696 * Suspended VM Wait.
697 * Only a handful of forced actions will cause the function to
698 * return to the caller.
699 *
700 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
701 * case an appropriate status code is returned.
702 * @param pVM VM handle.
703 * @thread The emulation thread.
704 */
705VMR3DECL(int) VMR3Wait(PVM pVM)
706{
707 LogFlow(("VMR3Wait:\n"));
708
709 /*
710 * Check Relevant FFs.
711 */
712 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
713 {
714 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
715 return VINF_SUCCESS;
716 }
717
718 /*
719 * Do waiting according to the halt method (so VMR3NotifyFF
720 * doesn't have to special case anything).
721 */
722 int rc = g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnWait(pVM);
723 LogFlow(("VMR3Wait: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions));
724 return rc;
725}
726
727
728/**
729 * Changes the halt method.
730 *
731 * @returns VBox status code.
732 * @param pVM The VM handle.
733 * @param enmHaltMethod The new halt method.
734 * @thread EMT.
735 */
736int vmR3SetHaltMethod(PVM pVM, VMHALTMETHOD enmHaltMethod)
737{
738 VM_ASSERT_EMT(pVM);
739 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
740
741 /*
742 * Resolve default (can be overridden in the configuration).
743 */
744 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
745 {
746 uint32_t u32;
747 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
748 if (VBOX_SUCCESS(rc))
749 {
750 enmHaltMethod = (VMHALTMETHOD)u32;
751 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
752 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d."), enmHaltMethod);
753 }
754 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
755 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t."));
756 else
757 enmHaltMethod = VMHALTMETHOD_1;
758 //enmHaltMethod = VMHALTMETHOD_OLD;
759 }
760
761 /*
762 * Find the descriptor.
763 */
764 unsigned i = 0;
765 while ( i < RT_ELEMENTS(g_aHaltMethods)
766 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
767 i++;
768 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
769
770 /*
771 * Terminate the old one.
772 */
773 if ( pVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
774 && g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnTerm)
775 {
776 g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnTerm(pVM);
777 pVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
778 }
779
780 /*
781 * Init the new one.
782 */
783 memset(&pVM->vm.s.Halt, 0, sizeof(pVM->vm.s.Halt));
784 if (g_aHaltMethods[i].pfnInit)
785 {
786 int rc = g_aHaltMethods[i].pfnInit(pVM);
787 AssertRCReturn(rc, rc);
788 }
789 pVM->vm.s.enmHaltMethod = enmHaltMethod;
790 ASMAtomicXchgU32(&pVM->vm.s.iHaltMethod, i);
791 return VINF_SUCCESS;
792}
793
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette