VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 19395

Last change on this file since 19395 was 19395, checked in by vboxsync, 16 years ago

GVMM,VM: Register the other EMTs or we assert painfully in gvmmR0ByVMAndEMT. A couple of todos and stuff.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 43.1 KB
Line 
1/* $Id: VMEmt.cpp 19395 2009-05-05 20:28:42Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/tm.h>
28#include <VBox/dbgf.h>
29#include <VBox/em.h>
30#include <VBox/pdmapi.h>
31#include <VBox/rem.h>
32#include <VBox/tm.h>
33#include "VMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/uvm.h>
36
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/semaphore.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/time.h>
45
46/*******************************************************************************
47* Internal Functions *
48*******************************************************************************/
49int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu);
50
51
52/**
53 * The emulation thread main function.
54 *
55 * @returns Thread exit code.
56 * @param ThreadSelf The handle to the executing thread.
57 * @param pvArgs Pointer to the user mode per-VCpu structure (UVMPCU).
58 */
59DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
60{
61 PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
62 return vmR3EmulationThreadWithId(ThreadSelf, pUVCpu, pUVCpu->idCpu);
63}
64
65
66/**
67 * The emulation thread main function, with Virtual CPU ID for debugging.
68 *
69 * @returns Thread exit code.
70 * @param ThreadSelf The handle to the executing thread.
71 * @param pUVCpu Pointer to the user mode per-VCpu structure.
72 * @param idCpu The virtual CPU ID, for backtrace purposes.
73 */
74int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
75{
76 PUVM pUVM = pUVCpu->pUVM;
77 int rc;
78
79 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
80 ("Invalid arguments to the emulation thread!\n"));
81
82 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
83 AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
84
85 /*
86 * The request loop.
87 */
88 rc = VINF_SUCCESS;
89 volatile VMSTATE enmBefore = VMSTATE_CREATING; /* volatile because of setjmp */
90 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
91 for (;;)
92 {
93 /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */
94 if (setjmp(pUVCpu->vm.s.emtJumpEnv) != 0)
95 {
96 rc = VINF_SUCCESS;
97 break;
98 }
99
100 /*
101 * During early init there is no pVM, so make a special path
102 * for that to keep things clearly separate.
103 */
104 if (!pUVM->pVM)
105 {
106 /*
107 * Check for termination first.
108 */
109 if (pUVM->vm.s.fTerminateEMT)
110 {
111 rc = VINF_EM_TERMINATE;
112 break;
113 }
114
115 /*
116 * Only the first VCPU may initialize the VM during early init
117 * and must therefore service all VMCPUID_ANY requests.
118 * See also VMR3Create
119 */
120 if ( pUVM->vm.s.pReqs
121 && pUVCpu->idCpu == 0)
122 {
123 /*
124 * Service execute in any EMT request.
125 */
126 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY);
127 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
128 }
129 else if (pUVCpu->vm.s.pReqs)
130 {
131 /*
132 * Service execute in specific EMT request.
133 */
134 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu);
135 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
136 }
137 else
138 {
139 /*
140 * Nothing important is pending, so wait for something.
141 */
142 rc = VMR3WaitU(pUVCpu);
143 if (RT_FAILURE(rc))
144 break;
145 }
146 }
147 else
148 {
149
150 /*
151 * Pending requests which needs servicing?
152 *
153 * We check for state changes in addition to status codes when
154 * servicing requests. (Look after the ifs.)
155 */
156 PVM pVM = pUVM->pVM;
157 enmBefore = pVM->enmVMState;
158 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
159 || pUVM->vm.s.fTerminateEMT)
160 {
161 rc = VINF_EM_TERMINATE;
162 break;
163 }
164 if (pUVM->vm.s.pReqs)
165 {
166 /*
167 * Service execute in any EMT request.
168 */
169 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY);
170 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
171 }
172 else if (pUVCpu->vm.s.pReqs)
173 {
174 /*
175 * Service execute in specific EMT request.
176 */
177 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu);
178 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pVM->enmVMState));
179 }
180 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
181 {
182 /*
183 * Service the debugger request.
184 */
185 rc = DBGFR3VMMForcedAction(pVM);
186 Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
187 }
188 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
189 {
190 /*
191 * Service a delayed reset request.
192 */
193 rc = VMR3Reset(pVM);
194 VM_FF_CLEAR(pVM, VM_FF_RESET);
195 Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
196 }
197 else
198 {
199 /*
200 * Nothing important is pending, so wait for something.
201 */
202 rc = VMR3WaitU(pUVCpu);
203 if (RT_FAILURE(rc))
204 break;
205 }
206
207 /*
208 * Check for termination requests, these have extremely high priority.
209 */
210 if ( rc == VINF_EM_TERMINATE
211 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
212 || pUVM->vm.s.fTerminateEMT)
213 break;
214 }
215
216 /*
217 * Some requests (both VMR3Req* and the DBGF) can potentially
218 * resume or start the VM, in that case we'll get a change in
219 * VM status indicating that we're now running.
220 */
221 if ( RT_SUCCESS(rc)
222 && pUVM->pVM
223 && enmBefore != pUVM->pVM->enmVMState
224 && pUVM->pVM->enmVMState == VMSTATE_RUNNING)
225 {
226 PVM pVM = pUVM->pVM;
227 PVMCPU pVCpu = &pVM->aCpus[idCpu];
228
229 rc = EMR3ExecuteVM(pVM, pVCpu);
230 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState));
231 if ( EMGetState(pVCpu) == EMSTATE_GURU_MEDITATION
232 && pVM->enmVMState == VMSTATE_RUNNING)
233 vmR3SetState(pVM, VMSTATE_GURU_MEDITATION);
234 }
235
236 } /* forever */
237
238
239 /*
240 * Exiting.
241 */
242 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n",
243 ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
244 if (pUVM->vm.s.fEMTDoesTheCleanup)
245 {
246 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
247 Assert(pUVM->pVM);
248 vmR3Destroy(pUVM->pVM);
249 vmR3DestroyFinalBitFromEMT(pUVM);
250 }
251 else
252 {
253 vmR3DestroyFinalBitFromEMT(pUVM);
254
255 pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
256 }
257 Log(("vmR3EmulationThread: EMT is terminated.\n"));
258 return rc;
259}
260
261
262#if 0 /* not used */
263/**
264 * Wait for VM to be resumed. Handle events like vmR3EmulationThread does.
265 * In case the VM is stopped, clean up and long jump to the main EMT loop.
266 *
267 * @returns VINF_SUCCESS or doesn't return
268 * @param pVM VM handle.
269 */
270VMMR3DECL(int) VMR3WaitForResume(PVM pVM)
271{
272 /*
273 * The request loop.
274 */
275 PUVMCPU pUVCpu;
276 PUVM pUVM = pVM->pUVM;
277 VMSTATE enmBefore;
278 int rc;
279
280 pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
281 AssertReturn(pUVCpu, VERR_INTERNAL_ERROR);
282
283 for (;;)
284 {
285
286 /*
287 * Pending requests which needs servicing?
288 *
289 * We check for state changes in addition to status codes when
290 * servicing requests. (Look after the ifs.)
291 */
292 enmBefore = pVM->enmVMState;
293 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
294 || pUVM->vm.s.fTerminateEMT)
295 {
296 rc = VINF_EM_TERMINATE;
297 break;
298 }
299 else if (pUVM->vm.s.pReqs)
300 {
301 /*
302 * Service execute in EMT request.
303 */
304 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY);
305 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
306 }
307 else if (pUVCpu->vm.s.pReqs)
308 {
309 /*
310 * Service execute in EMT request.
311 */
312 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu);
313 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pVM->enmVMState));
314 }
315 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
316 {
317 /*
318 * Service the debugger request.
319 */
320 rc = DBGFR3VMMForcedAction(pVM);
321 Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
322 }
323 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
324 {
325 /*
326 * Service a delay reset request.
327 */
328 rc = VMR3Reset(pVM);
329 VM_FF_CLEAR(pVM, VM_FF_RESET);
330 Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
331 }
332 else
333 {
334 /*
335 * Nothing important is pending, so wait for something.
336 */
337 rc = VMR3WaitU(pUVCpu);
338 if (RT_FAILURE(rc))
339 break;
340 }
341
342 /*
343 * Check for termination requests, these are extremely high priority.
344 */
345 if ( rc == VINF_EM_TERMINATE
346 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
347 || pUVM->vm.s.fTerminateEMT)
348 break;
349
350 /*
351 * Some requests (both VMR3Req* and the DBGF) can potentially
352 * resume or start the VM, in that case we'll get a change in
353 * VM status indicating that we're now running.
354 */
355 if ( RT_SUCCESS(rc)
356 && enmBefore != pVM->enmVMState
357 && pVM->enmVMState == VMSTATE_RUNNING)
358 {
359 /* Only valid exit reason. */
360 return VINF_SUCCESS;
361 }
362
363 } /* forever */
364
365 /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */
366 longjmp(pUVCpu->vm.s.emtJumpEnv, 1);
367}
368#endif
369
370/**
371 * Gets the name of a halt method.
372 *
373 * @returns Pointer to a read only string.
374 * @param enmMethod The method.
375 */
376static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
377{
378 switch (enmMethod)
379 {
380 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
381 case VMHALTMETHOD_DEFAULT: return "default";
382 case VMHALTMETHOD_OLD: return "old";
383 case VMHALTMETHOD_1: return "method1";
384 //case VMHALTMETHOD_2: return "method2";
385 case VMHALTMETHOD_GLOBAL_1: return "global1";
386 default: return "unknown";
387 }
388}
389
390
391/**
392 * The old halt loop.
393 */
394static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
395{
396 /*
397 * Halt loop.
398 */
399 PVM pVM = pUVCpu->pVM;
400 PVMCPU pVCpu = pUVCpu->pVCpu;
401
402 int rc = VINF_SUCCESS;
403 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
404 //unsigned cLoops = 0;
405 for (;;)
406 {
407 /*
408 * Work the timers and check if we can exit.
409 * The poll call gives us the ticks left to the next event in
410 * addition to perhaps set an FF.
411 */
412 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
413 TMR3TimerQueuesDo(pVM);
414 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
415 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
416 || VMCPU_FF_ISPENDING(pVCpu, fMask))
417 break;
418 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
419 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
420 || VMCPU_FF_ISPENDING(pVCpu, fMask))
421 break;
422
423 /*
424 * Wait for a while. Someone will wake us up or interrupt the call if
425 * anything needs our attention.
426 */
427 if (u64NanoTS < 50000)
428 {
429 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
430 /* spin */;
431 }
432 else
433 {
434 VMMR3YieldStop(pVM);
435 //uint64_t u64Start = RTTimeNanoTS();
436 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
437 {
438 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
439 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, a);
440 RTThreadYield(); /* this is the best we can do here */
441 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, a);
442 }
443 else if (u64NanoTS < 2000000)
444 {
445 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
446 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
447 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
448 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
449 }
450 else
451 {
452 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
453 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
454 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
455 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
456 }
457 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
458 //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
459 }
460 if (rc == VERR_TIMEOUT)
461 rc = VINF_SUCCESS;
462 else if (RT_FAILURE(rc))
463 {
464 AssertRC(rc != VERR_INTERRUPTED);
465 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
466 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
467 VM_FF_SET(pVM, VM_FF_TERMINATE);
468 rc = VERR_INTERNAL_ERROR;
469 break;
470 }
471 }
472
473 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
474 return rc;
475}
476
477
478/**
479 * Initialize the configuration of halt method 1 & 2.
480 *
481 * @return VBox status code. Failure on invalid CFGM data.
482 * @param pVM The VM handle.
483 */
484static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
485{
486 /*
487 * The defaults.
488 */
489#if 1 /* DEBUGGING STUFF - REMOVE LATER */
490 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
491 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
492 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
493 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
494 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
495#else
496 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
497 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
498 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
499 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
500 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
501#endif
502
503 /*
504 * Query overrides.
505 *
506 * I don't have time to bother with niceities such as invalid value checks
507 * here right now. sorry.
508 */
509 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
510 if (pCfg)
511 {
512 uint32_t u32;
513 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
514 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
515 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
516 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
517 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
518 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
519 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
520 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
521 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
522 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
523 LogRel(("HaltedMethod1 config: %d/%d/%d/%d/%d\n",
524 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
525 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
526 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
527 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
528 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
529 }
530
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * Initialize halt method 1.
537 *
538 * @return VBox status code.
539 * @param pUVM Pointer to the user mode VM structure.
540 */
541static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
542{
543 return vmR3HaltMethod12ReadConfigU(pUVM);
544}
545
546
547/**
548 * Method 1 - Block whenever possible, and when lagging behind
549 * switch to spinning for 10-30ms with occational blocking until
550 * the lag has been eliminated.
551 */
552static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
553{
554 PUVM pUVM = pUVCpu->pUVM;
555 PVMCPU pVCpu = pUVCpu->pVCpu;
556 PVM pVM = pUVCpu->pVM;
557
558 /*
559 * To simplify things, we decide up-front whether we should switch to spinning or
560 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
561 * and that it will generate interrupts or other events that will cause us to exit
562 * the halt loop.
563 */
564 bool fBlockOnce = false;
565 bool fSpinning = false;
566 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
567 if (u32CatchUpPct /* non-zero if catching up */)
568 {
569 if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
570 {
571 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
572 if (fSpinning)
573 {
574 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
575 fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
576 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
577 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
578 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
579 }
580 else
581 {
582 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
583 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
584 }
585 }
586 else
587 {
588 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
589 if (fSpinning)
590 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
591 }
592 }
593 else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
594 {
595 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
596 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
597 }
598
599 /*
600 * Halt loop.
601 */
602 int rc = VINF_SUCCESS;
603 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
604 unsigned cLoops = 0;
605 for (;; cLoops++)
606 {
607 /*
608 * Work the timers and check if we can exit.
609 */
610 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
611 TMR3TimerQueuesDo(pVM);
612 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
613 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
614 || VMCPU_FF_ISPENDING(pVCpu, fMask))
615 break;
616
617 /*
618 * Estimate time left to the next event.
619 */
620 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
621 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
622 || VMCPU_FF_ISPENDING(pVCpu, fMask))
623 break;
624
625 /*
626 * Block if we're not spinning and the interval isn't all that small.
627 */
628 if ( ( !fSpinning
629 || fBlockOnce)
630#if 1 /* DEBUGGING STUFF - REMOVE LATER */
631 && u64NanoTS >= 100000) /* 0.100 ms */
632#else
633 && u64NanoTS >= 250000) /* 0.250 ms */
634#endif
635 {
636 const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
637 VMMR3YieldStop(pVM);
638
639 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
640 if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
641 cMilliSecs = 1;
642 else
643 cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
644 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
645 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
646 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
647 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
648 if (rc == VERR_TIMEOUT)
649 rc = VINF_SUCCESS;
650 else if (RT_FAILURE(rc))
651 {
652 AssertRC(rc != VERR_INTERRUPTED);
653 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
654 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
655 VM_FF_SET(pVM, VM_FF_TERMINATE);
656 rc = VERR_INTERNAL_ERROR;
657 break;
658 }
659
660 /*
661 * Calc the statistics.
662 * Update averages every 16th time, and flush parts of the history every 64th time.
663 */
664 const uint64_t Elapsed = RTTimeNanoTS() - Start;
665 pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
666 if (Elapsed > u64NanoTS)
667 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
668 pUVCpu->vm.s.Halt.Method12.cBlocks++;
669 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
670 {
671 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
672 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
673 {
674 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
675 pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
676 }
677 }
678 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
679
680 /*
681 * Clear the block once flag if we actually blocked.
682 */
683 if ( fBlockOnce
684 && Elapsed > 100000 /* 0.1 ms */)
685 fBlockOnce = false;
686 }
687 }
688 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
689
690 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
691 return rc;
692}
693
694
695/**
696 * Initialize the global 1 halt method.
697 *
698 * @return VBox status code.
699 * @param pUVM Pointer to the user mode VM structure.
700 */
701static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
702{
703 return VINF_SUCCESS;
704}
705
706
707/**
708 * The global 1 halt method - Block in GMM (ring-0) and let it
709 * try take care of the global scheduling of EMT threads.
710 */
711static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
712{
713 PUVM pUVM = pUVCpu->pUVM;
714 PVMCPU pVCpu = pUVCpu->pVCpu;
715 PVM pVM = pUVCpu->pVM;
716 Assert(VMMGetCpu(pVM) == pVCpu);
717
718 /*
719 * Halt loop.
720 */
721 int rc = VINF_SUCCESS;
722 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
723 unsigned cLoops = 0;
724 for (;; cLoops++)
725 {
726 /*
727 * Work the timers and check if we can exit.
728 */
729 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
730 TMR3TimerQueuesDo(pVM);
731 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
732 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
733 || VMCPU_FF_ISPENDING(pVCpu, fMask))
734 break;
735
736 /*
737 * Estimate time left to the next event.
738 */
739 uint64_t u64Delta;
740 uint64_t u64GipTime = TMTimerPollGIP(pVM, &u64Delta);
741 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
742 || VMCPU_FF_ISPENDING(pVCpu, fMask))
743 break;
744
745 /*
746 * Block if we're not spinning and the interval isn't all that small.
747 */
748 if (u64Delta > 50000 /* 0.050ms */)
749 {
750 VMMR3YieldStop(pVM);
751 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
752 || VMCPU_FF_ISPENDING(pVCpu, fMask))
753 break;
754
755 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
756 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, c);
757 rc = SUPCallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
758 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, c);
759 if (rc == VERR_INTERRUPTED)
760 rc = VINF_SUCCESS;
761 else if (RT_FAILURE(rc))
762 {
763 AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc));
764 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
765 VM_FF_SET(pVM, VM_FF_TERMINATE);
766 rc = VERR_INTERNAL_ERROR;
767 break;
768 }
769 }
770 /*
771 * When spinning call upon the GVMM and do some wakups once
772 * in a while, it's not like we're actually busy or anything.
773 */
774 else if (!(cLoops & 0x1fff))
775 {
776 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, d);
777 rc = SUPCallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
778 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, d);
779 }
780 }
781 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
782
783 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
784 return rc;
785}
786
787
788/**
789 * The global 1 halt method - VMR3Wait() worker.
790 *
791 * @returns VBox status code.
792 * @param pUVCpu Pointer to the user mode VMCPU structure.
793 */
794static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
795{
796 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
797
798 PVM pVM = pUVCpu->pUVM->pVM;
799 PVMCPU pVCpu = VMMGetCpu(pVM);
800 Assert(pVCpu->idCpu == pUVCpu->idCpu);
801
802 int rc = VINF_SUCCESS;
803 for (;;)
804 {
805 /*
806 * Check Relevant FFs.
807 */
808 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
809 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
810 break;
811
812 /*
813 * Wait for a while. Someone will wake us up or interrupt the call if
814 * anything needs our attention.
815 */
816 rc = SUPCallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
817 if (rc == VERR_INTERRUPTED)
818 rc = VINF_SUCCESS;
819 else if (RT_FAILURE(rc))
820 {
821 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
822 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
823 VM_FF_SET(pVM, VM_FF_TERMINATE);
824 rc = VERR_INTERNAL_ERROR;
825 break;
826 }
827
828 }
829
830 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
831 return rc;
832}
833
834
835/**
836 * The global 1 halt method - VMR3NotifyFF() worker.
837 *
838 * @param pUVCpu Pointer to the user mode VMCPU structure.
839 * @param fNotifiedREM See VMR3NotifyFF().
840 */
841static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVMCPU pUVCpu, bool fNotifiedREM)
842{
843 if (pUVCpu->vm.s.fWait)
844 {
845 int rc = SUPCallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
846 AssertRC(rc);
847 }
848 else if (!fNotifiedREM)
849 REMR3NotifyFF(pUVCpu->pVM);
850}
851
852
853/**
854 * Bootstrap VMR3Wait() worker.
855 *
856 * @returns VBox status code.
857 * @param pUVMCPU Pointer to the user mode VMCPU structure.
858 */
859static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
860{
861 PUVM pUVM = pUVCpu->pUVM;
862
863 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
864
865 int rc = VINF_SUCCESS;
866 for (;;)
867 {
868 /*
869 * Check Relevant FFs.
870 */
871 if (pUVM->vm.s.pReqs) /* global requests pending? */
872 break;
873 if (pUVCpu->vm.s.pReqs) /* local requests pending? */
874 break;
875
876 if ( pUVCpu->pVM
877 && ( VM_FF_ISPENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
878 || VMCPU_FF_ISPENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
879 )
880 )
881 break;
882 if (pUVCpu->vm.s.fTerminateEMT)
883 break;
884
885 /*
886 * Wait for a while. Someone will wake us up or interrupt the call if
887 * anything needs our attention.
888 */
889 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
890 if (rc == VERR_TIMEOUT)
891 rc = VINF_SUCCESS;
892 else if (RT_FAILURE(rc))
893 {
894 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
895 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
896 if (pUVCpu->pVM)
897 VM_FF_SET(pUVCpu->pVM, VM_FF_TERMINATE);
898 rc = VERR_INTERNAL_ERROR;
899 break;
900 }
901
902 }
903
904 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
905 return rc;
906}
907
908
909/**
910 * Bootstrap VMR3NotifyFF() worker.
911 *
912 * @param pUVCpu Pointer to the user mode VMCPU structure.
913 * @param fNotifiedREM See VMR3NotifyFF().
914 */
915static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVMCPU pUVCpu, bool fNotifiedREM)
916{
917 if (pUVCpu->vm.s.fWait)
918 {
919 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
920 AssertRC(rc);
921 }
922}
923
924
925/**
926 * Default VMR3Wait() worker.
927 *
928 * @returns VBox status code.
929 * @param pUVMCPU Pointer to the user mode VMCPU structure.
930 */
931static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
932{
933 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
934
935 PVM pVM = pUVCpu->pVM;
936 PVMCPU pVCpu = pUVCpu->pVCpu;
937 int rc = VINF_SUCCESS;
938 for (;;)
939 {
940 /*
941 * Check Relevant FFs.
942 */
943 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
944 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
945 break;
946
947 /*
948 * Wait for a while. Someone will wake us up or interrupt the call if
949 * anything needs our attention.
950 */
951 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
952 if (rc == VERR_TIMEOUT)
953 rc = VINF_SUCCESS;
954 else if (RT_FAILURE(rc))
955 {
956 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
957 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
958 VM_FF_SET(pVM, VM_FF_TERMINATE);
959 rc = VERR_INTERNAL_ERROR;
960 break;
961 }
962
963 }
964
965 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
966 return rc;
967}
968
969
970/**
971 * Default VMR3NotifyFF() worker.
972 *
973 * @param pUVCpu Pointer to the user mode VMCPU structure.
974 * @param fNotifiedREM See VMR3NotifyFF().
975 */
976static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVMCPU pUVCpu, bool fNotifiedREM)
977{
978 if (pUVCpu->vm.s.fWait)
979 {
980 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
981 AssertRC(rc);
982 }
983 else if (!fNotifiedREM)
984 REMR3NotifyFF(pUVCpu->pVM);
985}
986
987
988/**
989 * Array with halt method descriptors.
990 * VMINT::iHaltMethod contains an index into this array.
991 */
992static const struct VMHALTMETHODDESC
993{
994 /** The halt method id. */
995 VMHALTMETHOD enmHaltMethod;
996 /** The init function for loading config and initialize variables. */
997 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
998 /** The term function. */
999 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
1000 /** The halt function. */
1001 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));
1002 /** The wait function. */
1003 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu));
1004 /** The notifyFF function. */
1005 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVMCPU pUVCpu, bool fNotifiedREM));
1006} g_aHaltMethods[] =
1007{
1008 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyFF },
1009 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF },
1010 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyFF },
1011 { VMHALTMETHOD_GLOBAL_1,vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyFF },
1012};
1013
1014
1015/**
1016 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1017 *
1018 * This function is called by thread other than EMT to make
1019 * sure EMT wakes up and promptly service an FF request.
1020 *
1021 * @param pVM VM handle.
1022 * @param pVCpu VMCPU handle (NULL if all/global notification)
1023 * @param fNotifiedREM Set if REM have already been notified. If clear the
1024 * generic REMR3NotifyFF() method is called.
1025 */
1026VMMR3DECL(void) VMR3NotifyGlobalFF(PVM pVM, bool fNotifiedREM)
1027{
1028 PUVM pUVM = pVM->pUVM;
1029
1030 LogFlow(("VMR3NotifyFF:\n"));
1031 /** @todo might want to have a 2nd look at this (SMP) */
1032 for (unsigned iCpu=0;iCpu<pVM->cCPUs;iCpu++)
1033 {
1034 PUVMCPU pUVCpu = pVM->aCpus[iCpu].pUVCpu;
1035 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM);
1036 }
1037}
1038
1039/**
1040 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1041 *
1042 * This function is called by thread other than EMT to make
1043 * sure EMT wakes up and promptly service an FF request.
1044 *
1045 * @param pVM VM handle.
1046 * @param pVCpu VMCPU handle (NULL if all/global notification)
1047 * @param fNotifiedREM Set if REM have already been notified. If clear the
1048 * generic REMR3NotifyFF() method is called.
1049 */
1050VMMR3DECL(void) VMR3NotifyCpuFF(PVMCPU pVCpu, bool fNotifiedREM)
1051{
1052 PUVMCPU pUVCpu = pVCpu->pUVCpu;
1053 PUVM pUVM = pUVCpu->pUVM;
1054
1055 LogFlow(("VMR3NotifyCpuFF:\n"));
1056 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM);
1057}
1058
1059
1060/**
1061 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1062 *
1063 * This function is called by thread other than EMT to make
1064 * sure EMT wakes up and promptly service an FF request.
1065 *
1066 * @param pUVM Pointer to the user mode VM structure.
1067 * @param fNotifiedREM Set if REM have already been notified. If clear the
1068 * generic REMR3NotifyFF() method is called.
1069 */
1070VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, bool fNotifiedREM)
1071{
1072 LogFlow(("VMR3NotifyGlobalFFU:\n"));
1073 /** @todo might want to have a 2nd look at this (SMP) */
1074 for (unsigned iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
1075 {
1076 PUVMCPU pUVCpu = &pUVM->aCpus[iCpu];
1077 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM);
1078 }
1079}
1080
1081/**
1082 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1083 *
1084 * This function is called by thread other than EMT to make
1085 * sure EMT wakes up and promptly service an FF request.
1086 *
1087 * @param pUVM Pointer to the user mode VM structure.
1088 * @param fNotifiedREM Set if REM have already been notified. If clear the
1089 * generic REMR3NotifyFF() method is called.
1090 */
1091VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, bool fNotifiedREM)
1092{
1093 PUVM pUVM = pUVCpu->pUVM;
1094
1095 LogFlow(("VMR3NotifyCpuFFU:\n"));
1096 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM);
1097}
1098
1099
1100/**
1101 * Halted VM Wait.
1102 * Any external event will unblock the thread.
1103 *
1104 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
1105 * case an appropriate status code is returned.
1106 * @param pVM VM handle.
1107 * @param pVCpu VMCPU handle.
1108 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
1109 * @thread The emulation thread.
1110 */
1111VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
1112{
1113 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
1114
1115 /*
1116 * Check Relevant FFs.
1117 */
1118 const uint32_t fMask = !fIgnoreInterrupts
1119 ? VMCPU_FF_EXTERNAL_HALTED_MASK
1120 : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
1121 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
1122 || VMCPU_FF_ISPENDING(pVCpu, fMask))
1123 {
1124 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
1125 return VINF_SUCCESS;
1126 }
1127
1128 /*
1129 * The yielder is suspended while we're halting, while TM might have clock(s) running
1130 * only at certain times and need to be notified..
1131 */
1132 VMMR3YieldSuspend(pVM);
1133 TMNotifyStartOfHalt(pVCpu);
1134
1135 /*
1136 * Record halt averages for the last second.
1137 */
1138 PUVMCPU pUVCpu = pVCpu->pUVCpu;
1139 uint64_t u64Now = RTTimeNanoTS();
1140 int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS;
1141 if (off > 1000000000)
1142 {
1143 if (off > _4G || !pUVCpu->vm.s.cHalts)
1144 {
1145 pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1146 pUVCpu->vm.s.HaltFrequency = 1;
1147 }
1148 else
1149 {
1150 pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts;
1151 pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off);
1152 }
1153 pUVCpu->vm.s.u64HaltsStartTS = u64Now;
1154 pUVCpu->vm.s.cHalts = 0;
1155 }
1156 pUVCpu->vm.s.cHalts++;
1157
1158 /*
1159 * Do the halt.
1160 */
1161 PUVM pUVM = pUVCpu->pUVM;
1162 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
1163
1164 /*
1165 * Notify TM and resume the yielder
1166 */
1167 TMNotifyEndOfHalt(pVCpu);
1168 VMMR3YieldResume(pVM);
1169
1170 LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions));
1171 return rc;
1172}
1173
1174
1175/**
1176 * Suspended VM Wait.
1177 * Only a handful of forced actions will cause the function to
1178 * return to the caller.
1179 *
1180 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
1181 * case an appropriate status code is returned.
1182 * @param pUVCpu Pointer to the user mode VMCPU structure.
1183 * @thread The emulation thread.
1184 */
1185VMMR3DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
1186{
1187 LogFlow(("VMR3WaitU:\n"));
1188
1189 /*
1190 * Check Relevant FFs.
1191 */
1192 PVM pVM = pUVCpu->pVM;
1193 PVMCPU pVCpu = pUVCpu->pVCpu;
1194
1195 if ( pVM
1196 && ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
1197 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
1198 )
1199 )
1200 {
1201 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fGlobalForcedActions));
1202 return VINF_SUCCESS;
1203 }
1204
1205 /*
1206 * Do waiting according to the halt method (so VMR3NotifyFF
1207 * doesn't have to special case anything).
1208 */
1209 PUVM pUVM = pUVCpu->pUVM;
1210 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu);
1211 LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pVM ? pVM->fGlobalForcedActions : 0));
1212 return rc;
1213}
1214
1215
1216/**
1217 * Changes the halt method.
1218 *
1219 * @returns VBox status code.
1220 * @param pUVM Pointer to the user mode VM structure.
1221 * @param enmHaltMethod The new halt method.
1222 * @thread EMT.
1223 */
1224int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1225{
1226 PVM pVM = pUVM->pVM; Assert(pVM);
1227 VM_ASSERT_EMT(pVM);
1228 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1229
1230 /*
1231 * Resolve default (can be overridden in the configuration).
1232 */
1233 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1234 {
1235 uint32_t u32;
1236 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1237 if (RT_SUCCESS(rc))
1238 {
1239 enmHaltMethod = (VMHALTMETHOD)u32;
1240 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1241 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1242 }
1243 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1244 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1245 else
1246 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1247 //enmHaltMethod = VMHALTMETHOD_1;
1248 //enmHaltMethod = VMHALTMETHOD_OLD;
1249 }
1250 LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1251
1252 /*
1253 * Find the descriptor.
1254 */
1255 unsigned i = 0;
1256 while ( i < RT_ELEMENTS(g_aHaltMethods)
1257 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1258 i++;
1259 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1260
1261 /*
1262 * Terminate the old one.
1263 */
1264 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1265 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1266 {
1267 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1268 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1269 }
1270
1271 /*
1272 * Init the new one.
1273 */
1274 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1275 if (g_aHaltMethods[i].pfnInit)
1276 {
1277 int rc = g_aHaltMethods[i].pfnInit(pUVM);
1278 AssertRCReturn(rc, rc);
1279 }
1280 pUVM->vm.s.enmHaltMethod = enmHaltMethod;
1281
1282 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1283 return VINF_SUCCESS;
1284}
1285
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette