VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EMHM.cpp@ 47421

Last change on this file since 47421 was 47421, checked in by vboxsync, 12 years ago

VMM: Use IEM for I/O that's been deferred to ring-3.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.9 KB
Line 
1/* $Id: EMHM.cpp 47421 2013-07-26 12:15:44Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager - hardware virtualization
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_EM
22#include <VBox/vmm/em.h>
23#include <VBox/vmm/vmm.h>
24#include <VBox/vmm/csam.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/pgm.h>
31#ifdef VBOX_WITH_REM
32# include <VBox/vmm/rem.h>
33#endif
34#include <VBox/vmm/tm.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/ssm.h>
37#include <VBox/vmm/pdmapi.h>
38#include <VBox/vmm/pdmcritsect.h>
39#include <VBox/vmm/pdmqueue.h>
40#include <VBox/vmm/hm.h>
41#include "EMInternal.h"
42#include <VBox/vmm/vm.h>
43#include <VBox/vmm/cpumdis.h>
44#include <VBox/dis.h>
45#include <VBox/disopcode.h>
46#include <VBox/vmm/dbgf.h>
47#include "VMMTracing.h"
48
49#include <iprt/asm.h>
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
56#define EM_NOTIFY_HM
57#endif
58
59
60/*******************************************************************************
61* Internal Functions *
62*******************************************************************************/
63DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
64static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
65static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
66
67#define EMHANDLERC_WITH_HM
68#include "EMHandleRCTmpl.h"
69
70
71#if defined(DEBUG) && defined(SOME_UNUSED_FUNCTIONS)
72
73/**
74 * Steps hardware accelerated mode.
75 *
76 * @returns VBox status code.
77 * @param pVM Pointer to the VM.
78 * @param pVCpu Pointer to the VMCPU.
79 */
80static int emR3HmStep(PVM pVM, PVMCPU pVCpu)
81{
82 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM);
83
84 int rc;
85 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
86# ifdef VBOX_WITH_RAW_MODE
87 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
88# endif
89
90 /*
91 * Check vital forced actions, but ignore pending interrupts and timers.
92 */
93 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
94 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
95 {
96 rc = emR3HmForcedActions(pVM, pVCpu, pCtx);
97 if (rc != VINF_SUCCESS)
98 return rc;
99 }
100 /*
101 * Set flags for single stepping.
102 */
103 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
104
105 /*
106 * Single step.
107 * We do not start time or anything, if anything we should just do a few nanoseconds.
108 */
109 do
110 {
111 rc = VMMR3HmRunGC(pVM, pVCpu);
112 } while ( rc == VINF_SUCCESS
113 || rc == VINF_EM_RAW_INTERRUPT);
114 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
115
116 /*
117 * Make sure the trap flag is cleared.
118 * (Too bad if the guest is trying to single step too.)
119 */
120 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
121
122 /*
123 * Deal with the return codes.
124 */
125 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
126 rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc);
127 return rc;
128}
129
130
131static int emR3SingleStepExecHm(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
132{
133 int rc = VINF_SUCCESS;
134 EMSTATE enmOldState = pVCpu->em.s.enmState;
135 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
136
137 Log(("Single step BEGIN:\n"));
138 for (uint32_t i = 0; i < cIterations; i++)
139 {
140 DBGFR3PrgStep(pVCpu);
141 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
142 rc = emR3HmStep(pVM, pVCpu);
143 if ( rc != VINF_SUCCESS
144 || !HMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))
145 break;
146 }
147 Log(("Single step END: rc=%Rrc\n", rc));
148 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
149 pVCpu->em.s.enmState = enmOldState;
150 return rc == VINF_SUCCESS ? VINF_EM_RESCHEDULE_REM : rc;
151}
152
153#endif /* DEBUG */
154
155
156/**
157 * Executes one (or perhaps a few more) instruction(s).
158 *
159 * @returns VBox status code suitable for EM.
160 *
161 * @param pVM Pointer to the VM.
162 * @param pVCpu Pointer to the VMCPU.
163 * @param rcRC Return code from RC.
164 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
165 * instruction and prefix the log output with this text.
166 */
167#ifdef LOG_ENABLED
168static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)
169#else
170static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
171#endif
172{
173#ifdef LOG_ENABLED
174 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
175#endif
176 int rc;
177 NOREF(rcRC);
178
179 /*
180 *
181 * The simple solution is to use the recompiler.
182 * The better solution is to disassemble the current instruction and
183 * try handle as many as possible without using REM.
184 *
185 */
186
187#ifdef LOG_ENABLED
188 /*
189 * Disassemble the instruction if requested.
190 */
191 if (pszPrefix)
192 {
193 DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
194 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
195 }
196#endif /* LOG_ENABLED */
197
198#if 0
199 /* Try our own instruction emulator before falling back to the recompiler. */
200 DISCPUSTATE Cpu;
201 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
202 if (RT_SUCCESS(rc))
203 {
204 switch (Cpu.pCurInstr->uOpcode)
205 {
206 /* @todo we can do more now */
207 case OP_MOV:
208 case OP_AND:
209 case OP_OR:
210 case OP_XOR:
211 case OP_POP:
212 case OP_INC:
213 case OP_DEC:
214 case OP_XCHG:
215 STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
216 rc = EMInterpretInstructionCpuUpdtPC(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0);
217 if (RT_SUCCESS(rc))
218 {
219#ifdef EM_NOTIFY_HM
220 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
221 HMR3NotifyEmulated(pVCpu);
222#endif
223 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
224 return rc;
225 }
226 if (rc != VERR_EM_INTERPRETER)
227 AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
228 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
229 break;
230 }
231 }
232#endif /* 0 */
233 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
234 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
235#ifdef VBOX_WITH_REM
236 EMRemLock(pVM);
237 /* Flush the recompiler TLB if the VCPU has changed. */
238 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
239 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
240 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
241
242 rc = REMR3EmulateInstruction(pVM, pVCpu);
243 EMRemUnlock(pVM);
244#else
245 rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
246#endif
247 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
248
249#ifdef EM_NOTIFY_HM
250 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
251 HMR3NotifyEmulated(pVCpu);
252#endif
253 return rc;
254}
255
256
257/**
258 * Executes one (or perhaps a few more) instruction(s).
259 * This is just a wrapper for discarding pszPrefix in non-logging builds.
260 *
261 * @returns VBox status code suitable for EM.
262 * @param pVM Pointer to the VM.
263 * @param pVCpu Pointer to the VMCPU.
264 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
265 * instruction and prefix the log output with this text.
266 * @param rcGC GC return code
267 */
268DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
269{
270#ifdef LOG_ENABLED
271 return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
272#else
273 return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC);
274#endif
275}
276
277/**
278 * Executes one (or perhaps a few more) IO instruction(s).
279 *
280 * @returns VBox status code suitable for EM.
281 * @param pVM Pointer to the VM.
282 * @param pVCpu Pointer to the VMCPU.
283 */
284static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
285{
286 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
287
288 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
289
290 /* Try to restart the io instruction that was refused in ring-0. */
291 VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
292 if (IOM_SUCCESS(rcStrict))
293 {
294 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
295 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
296 return VBOXSTRICTRC_TODO(rcStrict); /* rip already updated. */
297 }
298 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
299 RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));
300
301#ifdef VBOX_WITH_FIRST_IEM_STEP
302 /* Hand it over to the interpreter. */
303 rcStrict = IEMExecOne(pVCpu);
304 LogFlow(("emR3ExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
305 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
306 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
307 return VBOXSTRICTRC_TODO(rcStrict);
308
309#else
310 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
311 * as io instructions tend to come in packages of more than one
312 */
313 DISCPUSTATE Cpu;
314 int rc2 = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
315 if (RT_SUCCESS(rc2))
316 {
317 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
318
319 if (!(Cpu.fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE)))
320 {
321 switch (Cpu.pCurInstr->uOpcode)
322 {
323 case OP_IN:
324 {
325 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
326 rcStrict = IOMInterpretIN(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
327 break;
328 }
329
330 case OP_OUT:
331 {
332 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
333 rcStrict = IOMInterpretOUT(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
334 break;
335 }
336 }
337 }
338 else if (Cpu.fPrefix & DISPREFIX_REP)
339 {
340 switch (Cpu.pCurInstr->uOpcode)
341 {
342 case OP_INSB:
343 case OP_INSWD:
344 {
345 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
346 rcStrict = IOMInterpretINS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
347 break;
348 }
349
350 case OP_OUTSB:
351 case OP_OUTSWD:
352 {
353 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
354 rcStrict = IOMInterpretOUTS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
355 break;
356 }
357 }
358 }
359
360 /*
361 * Handled the I/O return codes.
362 * (The unhandled cases end up with rcStrict == VINF_EM_RAW_EMULATE_INSTR.)
363 */
364 if (IOM_SUCCESS(rcStrict))
365 {
366 pCtx->rip += Cpu.cbInstr;
367 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
368 return VBOXSTRICTRC_TODO(rcStrict);
369 }
370
371 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
372 {
373 /* The active trap will be dispatched. */
374 Assert(TRPMHasTrap(pVCpu));
375 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
376 return VINF_SUCCESS;
377 }
378 AssertMsg(rcStrict != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
379
380 if (RT_FAILURE(rcStrict))
381 {
382 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
383 return VBOXSTRICTRC_TODO(rcStrict);
384 }
385 AssertMsg(rcStrict == VINF_EM_RAW_EMULATE_INSTR || rcStrict == VINF_EM_RESCHEDULE_REM, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
386 }
387
388 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
389 return emR3ExecuteInstruction(pVM, pVCpu, "IO: ");
390#endif
391}
392
393
394/**
395 * Process raw-mode specific forced actions.
396 *
397 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
398 *
399 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
400 * EM statuses.
401 * @param pVM Pointer to the VM.
402 * @param pVCpu Pointer to the VMCPU.
403 * @param pCtx Pointer to the guest CPU context.
404 */
405static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
406{
407 /*
408 * Sync page directory.
409 */
410 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
411 {
412 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
413 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
414 if (RT_FAILURE(rc))
415 return rc;
416
417#ifdef VBOX_WITH_RAW_MODE
418 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
419#endif
420
421 /* Prefetch pages for EIP and ESP. */
422 /** @todo This is rather expensive. Should investigate if it really helps at all. */
423 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
424 if (rc == VINF_SUCCESS)
425 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
426 if (rc != VINF_SUCCESS)
427 {
428 if (rc != VINF_PGM_SYNC_CR3)
429 {
430 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
431 return rc;
432 }
433 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
434 if (RT_FAILURE(rc))
435 return rc;
436 }
437 /** @todo maybe prefetch the supervisor stack page as well */
438#ifdef VBOX_WITH_RAW_MODE
439 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
440#endif
441 }
442
443 /*
444 * Allocate handy pages (just in case the above actions have consumed some pages).
445 */
446 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
447 {
448 int rc = PGMR3PhysAllocateHandyPages(pVM);
449 if (RT_FAILURE(rc))
450 return rc;
451 }
452
453 /*
454 * Check whether we're out of memory now.
455 *
456 * This may stem from some of the above actions or operations that has been executed
457 * since we ran FFs. The allocate handy pages must for instance always be followed by
458 * this check.
459 */
460 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
461 return VINF_EM_NO_MEMORY;
462
463 return VINF_SUCCESS;
464}
465
466
467/**
468 * Executes hardware accelerated raw code. (Intel VT-x & AMD-V)
469 *
470 * This function contains the raw-mode version of the inner
471 * execution loop (the outer loop being in EMR3ExecuteVM()).
472 *
473 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
474 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
475 *
476 * @param pVM Pointer to the VM.
477 * @param pVCpu Pointer to the VMCPU.
478 * @param pfFFDone Where to store an indicator telling whether or not
479 * FFs were done before returning.
480 */
481int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
482{
483 int rc = VERR_IPE_UNINITIALIZED_STATUS;
484 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
485
486 LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
487 *pfFFDone = false;
488
489 STAM_COUNTER_INC(&pVCpu->em.s.StatHmExecuteEntry);
490
491#ifdef EM_NOTIFY_HM
492 HMR3NotifyScheduled(pVCpu);
493#endif
494
495 /*
496 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
497 */
498 for (;;)
499 {
500 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHmEntry, a);
501
502 /* Check if a forced reschedule is pending. */
503 if (HMR3IsRescheduleRequired(pVM, pCtx))
504 {
505 rc = VINF_EM_RESCHEDULE;
506 break;
507 }
508
509 /*
510 * Process high priority pre-execution raw-mode FFs.
511 */
512#ifdef VBOX_WITH_RAW_MODE
513 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
514#endif
515 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
516 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
517 {
518 rc = emR3HmForcedActions(pVM, pVCpu, pCtx);
519 if (rc != VINF_SUCCESS)
520 break;
521 }
522
523#ifdef LOG_ENABLED
524 /*
525 * Log important stuff before entering GC.
526 */
527 if (TRPMHasTrap(pVCpu))
528 Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
529
530 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
531
532 if (pVM->cCpus == 1)
533 {
534 if (pCtx->eflags.Bits.u1VM)
535 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
536 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
537 Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
538 else
539 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
540 }
541 else
542 {
543 if (pCtx->eflags.Bits.u1VM)
544 Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));
545 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
546 Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
547 else
548 Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
549 }
550#endif /* LOG_ENABLED */
551
552 /*
553 * Execute the code.
554 */
555 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHmEntry, a);
556
557 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
558 {
559 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x);
560 rc = VMMR3HmRunGC(pVM, pVCpu);
561 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x);
562 }
563 else
564 {
565 /* Give up this time slice; virtual time continues */
566 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
567 RTThreadSleep(5);
568 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
569 rc = VINF_SUCCESS;
570 }
571
572
573 /*
574 * Deal with high priority post execution FFs before doing anything else.
575 */
576 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
577 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
578 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
579 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
580
581 /*
582 * Process the returned status code.
583 */
584 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
585 break;
586
587 rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc);
588 if (rc != VINF_SUCCESS)
589 break;
590
591 /*
592 * Check and execute forced actions.
593 */
594#ifdef VBOX_HIGH_RES_TIMERS_HACK
595 TMTimerPollVoid(pVM, pVCpu);
596#endif
597 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK)
598 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK))
599 {
600 rc = emR3ForcedActions(pVM, pVCpu, rc);
601 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
602 if ( rc != VINF_SUCCESS
603 && rc != VINF_EM_RESCHEDULE_HM)
604 {
605 *pfFFDone = true;
606 break;
607 }
608 }
609 }
610
611 /*
612 * Return to outer loop.
613 */
614#if defined(LOG_ENABLED) && defined(DEBUG)
615 RTLogFlush(NULL);
616#endif
617 return rc;
618}
619
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette