VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EMHwaccm.cpp@ 39895

Last change on this file since 39895 was 39685, checked in by vboxsync, 13 years ago

doc updates.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.3 KB
Line 
1/* $Id: EMHwaccm.cpp 39685 2011-12-30 01:29:33Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager - hardware virtualization
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_EM
22#include <VBox/vmm/em.h>
23#include <VBox/vmm/vmm.h>
24#include <VBox/vmm/csam.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/vmm/dbgf.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/mm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/pdmapi.h>
35#include <VBox/vmm/pdmcritsect.h>
36#include <VBox/vmm/pdmqueue.h>
37#include <VBox/vmm/hwaccm.h>
38#include "EMInternal.h"
39#include "internal/em.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/cpumdis.h>
42#include <VBox/dis.h>
43#include <VBox/disopcode.h>
44#include <VBox/vmm/dbgf.h>
45
46#include <iprt/asm.h>
47
48
49/*******************************************************************************
50* Defined Constants And Macros *
51*******************************************************************************/
52#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
53#define EM_NOTIFY_HWACCM
54#endif
55
56
57/*******************************************************************************
58* Internal Functions *
59*******************************************************************************/
60DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
61static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
62static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
63
64#define EMHANDLERC_WITH_HWACCM
65#include "EMHandleRCTmpl.h"
66
67
68#if defined(DEBUG) && defined(SOME_UNUSED_FUNCTIONS)
69
70/**
71 * Steps hardware accelerated mode.
72 *
73 * @returns VBox status code.
74 * @param pVM The VM handle.
75 * @param pVCpu The VMCPU handle.
76 */
77static int emR3HwAccStep(PVM pVM, PVMCPU pVCpu)
78{
79 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC);
80
81 int rc;
82 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
83 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
84
85 /*
86 * Check vital forced actions, but ignore pending interrupts and timers.
87 */
88 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
89 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
90 {
91 rc = emR3HwaccmForcedActions(pVM, pVCpu, pCtx);
92 if (rc != VINF_SUCCESS)
93 return rc;
94 }
95 /*
96 * Set flags for single stepping.
97 */
98 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
99
100 /*
101 * Single step.
102 * We do not start time or anything, if anything we should just do a few nanoseconds.
103 */
104 do
105 {
106 rc = VMMR3HwAccRunGC(pVM, pVCpu);
107 } while ( rc == VINF_SUCCESS
108 || rc == VINF_EM_RAW_INTERRUPT);
109 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
110
111 /*
112 * Make sure the trap flag is cleared.
113 * (Too bad if the guest is trying to single step too.)
114 */
115 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
116
117 /*
118 * Deal with the return codes.
119 */
120 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
121 rc = emR3HwaccmHandleRC(pVM, pVCpu, pCtx, rc);
122 return rc;
123}
124
125
126static int emR3SingleStepExecHwAcc(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
127{
128 int rc = VINF_SUCCESS;
129 EMSTATE enmOldState = pVCpu->em.s.enmState;
130 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HWACC;
131
132 Log(("Single step BEGIN:\n"));
133 for (uint32_t i = 0; i < cIterations; i++)
134 {
135 DBGFR3PrgStep(pVCpu);
136 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
137 rc = emR3HwAccStep(pVM, pVCpu);
138 if ( rc != VINF_SUCCESS
139 || !HWACCMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))
140 break;
141 }
142 Log(("Single step END: rc=%Rrc\n", rc));
143 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
144 pVCpu->em.s.enmState = enmOldState;
145 return rc == VINF_SUCCESS ? VINF_EM_RESCHEDULE_REM : rc;
146}
147
148#endif /* DEBUG */
149
150
151/**
152 * Executes one (or perhaps a few more) instruction(s).
153 *
154 * @returns VBox status code suitable for EM.
155 *
156 * @param pVM VM handle.
157 * @param pVCpu VMCPU handle
158 * @param rcRC Return code from RC.
159 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
160 * instruction and prefix the log output with this text.
161 */
162#ifdef LOG_ENABLED
163static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)
164#else
165static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
166#endif
167{
168#ifdef LOG_ENABLED
169 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
170#endif
171 int rc;
172 NOREF(rcRC);
173
174 /*
175 *
176 * The simple solution is to use the recompiler.
177 * The better solution is to disassemble the current instruction and
178 * try handle as many as possible without using REM.
179 *
180 */
181
182#ifdef LOG_ENABLED
183 /*
184 * Disassemble the instruction if requested.
185 */
186 if (pszPrefix)
187 {
188 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
189 DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix);
190 }
191#endif /* LOG_ENABLED */
192
193#if 0
194 /* Try our own instruction emulator before falling back to the recompiler. */
195 DISCPUSTATE Cpu;
196 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
197 if (RT_SUCCESS(rc))
198 {
199 uint32_t size;
200
201 switch (Cpu.pCurInstr->opcode)
202 {
203 /* @todo we can do more now */
204 case OP_MOV:
205 case OP_AND:
206 case OP_OR:
207 case OP_XOR:
208 case OP_POP:
209 case OP_INC:
210 case OP_DEC:
211 case OP_XCHG:
212 STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
213 rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
214 if (RT_SUCCESS(rc))
215 {
216 pCtx->rip += Cpu.opsize;
217#ifdef EM_NOTIFY_HWACCM
218 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
219 HWACCMR3NotifyEmulated(pVCpu);
220#endif
221 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
222 return rc;
223 }
224 if (rc != VERR_EM_INTERPRETER)
225 AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
226 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
227 break;
228 }
229 }
230#endif /* 0 */
231 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
232 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
233 EMRemLock(pVM);
234 /* Flush the recompiler TLB if the VCPU has changed. */
235 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
236 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
237 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
238
239 rc = REMR3EmulateInstruction(pVM, pVCpu);
240 EMRemUnlock(pVM);
241 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
242
243#ifdef EM_NOTIFY_HWACCM
244 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
245 HWACCMR3NotifyEmulated(pVCpu);
246#endif
247 return rc;
248}
249
250
251/**
252 * Executes one (or perhaps a few more) instruction(s).
253 * This is just a wrapper for discarding pszPrefix in non-logging builds.
254 *
255 * @returns VBox status code suitable for EM.
256 * @param pVM VM handle.
257 * @param pVCpu VMCPU handle.
258 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
259 * instruction and prefix the log output with this text.
260 * @param rcGC GC return code
261 */
262DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
263{
264#ifdef LOG_ENABLED
265 return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
266#else
267 return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC);
268#endif
269}
270
271/**
272 * Executes one (or perhaps a few more) IO instruction(s).
273 *
274 * @returns VBox status code suitable for EM.
275 * @param pVM VM handle.
276 * @param pVCpu VMCPU handle.
277 */
278static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
279{
280 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
281
282 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
283
284 /* Try to restart the io instruction that was refused in ring-0. */
285 VBOXSTRICTRC rcStrict = HWACCMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
286 if (IOM_SUCCESS(rcStrict))
287 {
288 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
289 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
290 return VBOXSTRICTRC_TODO(rcStrict); /* rip already updated. */
291 }
292 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
293 RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));
294
295 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
296 * as io instructions tend to come in packages of more than one
297 */
298 DISCPUSTATE Cpu;
299 int rc2 = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
300 if (RT_SUCCESS(rc2))
301 {
302 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
303
304 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
305 {
306 switch (Cpu.pCurInstr->opcode)
307 {
308 case OP_IN:
309 {
310 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
311 rcStrict = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
312 break;
313 }
314
315 case OP_OUT:
316 {
317 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
318 rcStrict = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
319 break;
320 }
321 }
322 }
323 else if (Cpu.prefix & PREFIX_REP)
324 {
325 switch (Cpu.pCurInstr->opcode)
326 {
327 case OP_INSB:
328 case OP_INSWD:
329 {
330 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
331 rcStrict = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
332 break;
333 }
334
335 case OP_OUTSB:
336 case OP_OUTSWD:
337 {
338 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
339 rcStrict = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
340 break;
341 }
342 }
343 }
344
345 /*
346 * Handled the I/O return codes.
347 * (The unhandled cases end up with rcStrict == VINF_EM_RAW_EMULATE_INSTR.)
348 */
349 if (IOM_SUCCESS(rcStrict))
350 {
351 pCtx->rip += Cpu.opsize;
352 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
353 return VBOXSTRICTRC_TODO(rcStrict);
354 }
355
356 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
357 {
358 /* The active trap will be dispatched. */
359 Assert(TRPMHasTrap(pVCpu));
360 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
361 return VINF_SUCCESS;
362 }
363 AssertMsg(rcStrict != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
364
365 if (RT_FAILURE(rcStrict))
366 {
367 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
368 return VBOXSTRICTRC_TODO(rcStrict);
369 }
370 AssertMsg(rcStrict == VINF_EM_RAW_EMULATE_INSTR || rcStrict == VINF_EM_RESCHEDULE_REM, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
371 }
372
373 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
374 return emR3ExecuteInstruction(pVM, pVCpu, "IO: ");
375}
376
377
378/**
379 * Process raw-mode specific forced actions.
380 *
381 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
382 *
383 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
384 * EM statuses.
385 * @param pVM The VM handle.
386 * @param pVCpu The VMCPU handle.
387 * @param pCtx The guest CPUM register context.
388 */
389static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
390{
391 /*
392 * Sync page directory.
393 */
394 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
395 {
396 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
397 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
398 if (RT_FAILURE(rc))
399 return rc;
400
401 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
402
403 /* Prefetch pages for EIP and ESP. */
404 /** @todo This is rather expensive. Should investigate if it really helps at all. */
405 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
406 if (rc == VINF_SUCCESS)
407 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
408 if (rc != VINF_SUCCESS)
409 {
410 if (rc != VINF_PGM_SYNC_CR3)
411 {
412 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
413 return rc;
414 }
415 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
416 if (RT_FAILURE(rc))
417 return rc;
418 }
419 /** @todo maybe prefetch the supervisor stack page as well */
420 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
421 }
422
423 /*
424 * Allocate handy pages (just in case the above actions have consumed some pages).
425 */
426 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
427 {
428 int rc = PGMR3PhysAllocateHandyPages(pVM);
429 if (RT_FAILURE(rc))
430 return rc;
431 }
432
433 /*
434 * Check whether we're out of memory now.
435 *
436 * This may stem from some of the above actions or operations that has been executed
437 * since we ran FFs. The allocate handy pages must for instance always be followed by
438 * this check.
439 */
440 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
441 return VINF_EM_NO_MEMORY;
442
443 return VINF_SUCCESS;
444}
445
446
447/**
448 * Executes hardware accelerated raw code. (Intel VT-x & AMD-V)
449 *
450 * This function contains the raw-mode version of the inner
451 * execution loop (the outer loop being in EMR3ExecuteVM()).
452 *
453 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
454 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
455 *
456 * @param pVM VM handle.
457 * @param pVCpu VMCPU handle.
458 * @param pfFFDone Where to store an indicator telling whether or not
459 * FFs were done before returning.
460 */
461int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
462{
463 int rc = VERR_IPE_UNINITIALIZED_STATUS;
464 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
465
466 LogFlow(("emR3HwAccExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip));
467 *pfFFDone = false;
468
469 STAM_COUNTER_INC(&pVCpu->em.s.StatHwAccExecuteEntry);
470
471#ifdef EM_NOTIFY_HWACCM
472 HWACCMR3NotifyScheduled(pVCpu);
473#endif
474
475 /*
476 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
477 */
478 for (;;)
479 {
480 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHwAccEntry, a);
481
482 /* Check if a forced reschedule is pending. */
483 if (HWACCMR3IsRescheduleRequired(pVM, pCtx))
484 {
485 rc = VINF_EM_RESCHEDULE;
486 break;
487 }
488
489 /*
490 * Process high priority pre-execution raw-mode FFs.
491 */
492 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */
493 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
494 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
495 {
496 rc = emR3HwaccmForcedActions(pVM, pVCpu, pCtx);
497 if (rc != VINF_SUCCESS)
498 break;
499 }
500
501#ifdef LOG_ENABLED
502 /*
503 * Log important stuff before entering GC.
504 */
505 if (TRPMHasTrap(pVCpu))
506 Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs, (RTGCPTR)pCtx->rip));
507
508 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
509
510 if (pVM->cCpus == 1)
511 {
512 if (pCtx->eflags.Bits.u1VM)
513 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
514 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
515 Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
516 else
517 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
518 }
519 else
520 {
521 if (pCtx->eflags.Bits.u1VM)
522 Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));
523 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
524 Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
525 else
526 Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
527 }
528#endif /* LOG_ENABLED */
529
530 /*
531 * Execute the code.
532 */
533 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHwAccEntry, a);
534
535 if (RT_LIKELY(EMR3IsExecutionAllowed(pVM, pVCpu)))
536 {
537 STAM_PROFILE_START(&pVCpu->em.s.StatHwAccExec, x);
538 rc = VMMR3HwAccRunGC(pVM, pVCpu);
539 STAM_PROFILE_STOP(&pVCpu->em.s.StatHwAccExec, x);
540 }
541 else
542 {
543 /* Give up this time slice; virtual time continues */
544 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
545 RTThreadSleep(5);
546 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
547 rc = VINF_SUCCESS;
548 }
549
550
551 /*
552 * Deal with high priority post execution FFs before doing anything else.
553 */
554 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
555 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
556 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
557 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
558
559 /*
560 * Process the returned status code.
561 */
562 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
563 break;
564
565 rc = emR3HwaccmHandleRC(pVM, pVCpu, pCtx, rc);
566 if (rc != VINF_SUCCESS)
567 break;
568
569 /*
570 * Check and execute forced actions.
571 */
572#ifdef VBOX_HIGH_RES_TIMERS_HACK
573 TMTimerPollVoid(pVM, pVCpu);
574#endif
575 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK)
576 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_MASK))
577 {
578 rc = emR3ForcedActions(pVM, pVCpu, rc);
579 if ( rc != VINF_SUCCESS
580 && rc != VINF_EM_RESCHEDULE_HWACC)
581 {
582 *pfFFDone = true;
583 break;
584 }
585 }
586 }
587
588 /*
589 * Return to outer loop.
590 */
591#if defined(LOG_ENABLED) && defined(DEBUG)
592 RTLogFlush(NULL);
593#endif
594 return rc;
595}
596
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette