VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 12866

Last change on this file since 12866 was 12831, checked in by vboxsync, 16 years ago

Deal with CR0 updates inside patch code that force us to go to the recompiler.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 147.2 KB
Line 
1/* $Id: EM.cpp 12831 2008-09-30 13:44:47Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor/Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_em EM - The Execution Monitor/Manager
23 *
24 * The Execution Monitor/Manager is responsible for running the VM, scheduling
25 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
26 * Interpreted), and keeping the CPU states in sync. The function
27 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
28 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
29 * emR3RemExecute).
30 *
31 * The interpreted execution is only used to avoid switching between
32 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
33 * The interpretation is thus implemented as part of EM.
34 */
35
36/*******************************************************************************
37* Header Files *
38*******************************************************************************/
39#define LOG_GROUP LOG_GROUP_EM
40#include <VBox/em.h>
41#include <VBox/vmm.h>
42#include <VBox/patm.h>
43#include <VBox/csam.h>
44#include <VBox/selm.h>
45#include <VBox/trpm.h>
46#include <VBox/iom.h>
47#include <VBox/dbgf.h>
48#include <VBox/pgm.h>
49#include <VBox/rem.h>
50#include <VBox/tm.h>
51#include <VBox/mm.h>
52#include <VBox/ssm.h>
53#include <VBox/pdmapi.h>
54#include <VBox/pdmcritsect.h>
55#include <VBox/pdmqueue.h>
56#include <VBox/hwaccm.h>
57#include <VBox/patm.h>
58#include "EMInternal.h"
59#include <VBox/vm.h>
60#include <VBox/cpumdis.h>
61#include <VBox/dis.h>
62#include <VBox/disopcode.h>
63#include <VBox/dbgf.h>
64
65#include <VBox/log.h>
66#include <iprt/thread.h>
67#include <iprt/assert.h>
68#include <iprt/asm.h>
69#include <iprt/semaphore.h>
70#include <iprt/string.h>
71#include <iprt/avl.h>
72#include <iprt/stream.h>
73#include <VBox/param.h>
74#include <VBox/err.h>
75
76
77/*******************************************************************************
78* Internal Functions *
79*******************************************************************************/
80static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
81static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
82static int emR3Debug(PVM pVM, int rc);
83static int emR3RemStep(PVM pVM);
84static int emR3RemExecute(PVM pVM, bool *pfFFDone);
85static int emR3RawResumeHyper(PVM pVM);
86static int emR3RawStep(PVM pVM);
87DECLINLINE(int) emR3RawHandleRC(PVM pVM, PCPUMCTX pCtx, int rc);
88DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PCPUMCTX pCtx, int rc);
89static int emR3RawForcedActions(PVM pVM, PCPUMCTX pCtx);
90static int emR3RawExecute(PVM pVM, bool *pfFFDone);
91DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, const char *pszPrefix, int rcGC = VINF_SUCCESS);
92static int emR3HighPriorityPostForcedActions(PVM pVM, int rc);
93static int emR3ForcedActions(PVM pVM, int rc);
94static int emR3RawGuestTrap(PVM pVM);
95
96
97/**
98 * Initializes the EM.
99 *
100 * @returns VBox status code.
101 * @param pVM The VM to operate on.
102 */
103EMR3DECL(int) EMR3Init(PVM pVM)
104{
105 LogFlow(("EMR3Init\n"));
106 /*
107 * Assert alignment and sizes.
108 */
109 AssertRelease(!(RT_OFFSETOF(VM, em.s) & 31));
110 AssertRelease(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
111 AssertReleaseMsg(sizeof(pVM->em.s.u.FatalLongJump) <= sizeof(pVM->em.s.u.achPaddingFatalLongJump),
112 ("%d bytes, padding %d\n", sizeof(pVM->em.s.u.FatalLongJump), sizeof(pVM->em.s.u.achPaddingFatalLongJump)));
113
114 /*
115 * Init the structure.
116 */
117 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
118 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
119 if (VBOX_FAILURE(rc))
120 pVM->fRawR3Enabled = true;
121 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
122 if (VBOX_FAILURE(rc))
123 pVM->fRawR0Enabled = true;
124 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
125 pVM->em.s.enmState = EMSTATE_NONE;
126 pVM->em.s.fForceRAW = false;
127
128 rc = CPUMQueryGuestCtxPtr(pVM, &pVM->em.s.pCtx);
129 AssertMsgRC(rc, ("CPUMQueryGuestCtxPtr -> %Vrc\n", rc));
130 pVM->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
131 AssertMsg(pVM->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
132
133 /*
134 * Saved state.
135 */
136 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
137 NULL, emR3Save, NULL,
138 NULL, emR3Load, NULL);
139 if (VBOX_FAILURE(rc))
140 return rc;
141
142 /*
143 * Statistics.
144 */
145#ifdef VBOX_WITH_STATISTICS
146 PEMSTATS pStats;
147 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
148 if (VBOX_FAILURE(rc))
149 return rc;
150 pVM->em.s.pStatsR3 = pStats;
151 pVM->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
152 pVM->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
153
154 STAM_REG(pVM, &pStats->StatRZEmulate, STAMTYPE_PROFILE, "/EM/RZ/Interpret", STAMUNIT_TICKS_PER_CALL, "Profiling of EMInterpretInstruction.");
155 STAM_REG(pVM, &pStats->StatR3Emulate, STAMTYPE_PROFILE, "/EM/R3/Interpret", STAMUNIT_TICKS_PER_CALL, "Profiling of EMInterpretInstruction.");
156
157 STAM_REG(pVM, &pStats->StatRZInterpretSucceeded, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success", STAMUNIT_OCCURENCES, "The number of times an instruction was successfully interpreted.");
158 STAM_REG(pVM, &pStats->StatR3InterpretSucceeded, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success", STAMUNIT_OCCURENCES, "The number of times an instruction was successfully interpreted.");
159
160 STAM_REG_USED(pVM, &pStats->StatRZAnd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/And", STAMUNIT_OCCURENCES, "The number of times AND was successfully interpreted.");
161 STAM_REG_USED(pVM, &pStats->StatR3And, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/And", STAMUNIT_OCCURENCES, "The number of times AND was successfully interpreted.");
162 STAM_REG_USED(pVM, &pStats->StatRZAdd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Add", STAMUNIT_OCCURENCES, "The number of times ADD was successfully interpreted.");
163 STAM_REG_USED(pVM, &pStats->StatR3Add, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Add", STAMUNIT_OCCURENCES, "The number of times ADD was successfully interpreted.");
164 STAM_REG_USED(pVM, &pStats->StatRZAdc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was successfully interpreted.");
165 STAM_REG_USED(pVM, &pStats->StatR3Adc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was successfully interpreted.");
166 STAM_REG_USED(pVM, &pStats->StatRZSub, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was successfully interpreted.");
167 STAM_REG_USED(pVM, &pStats->StatR3Sub, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was successfully interpreted.");
168 STAM_REG_USED(pVM, &pStats->StatRZCpuId, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was successfully interpreted.");
169 STAM_REG_USED(pVM, &pStats->StatR3CpuId, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was successfully interpreted.");
170 STAM_REG_USED(pVM, &pStats->StatRZDec, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was successfully interpreted.");
171 STAM_REG_USED(pVM, &pStats->StatR3Dec, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was successfully interpreted.");
172 STAM_REG_USED(pVM, &pStats->StatRZHlt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was successfully interpreted.");
173 STAM_REG_USED(pVM, &pStats->StatR3Hlt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was successfully interpreted.");
174 STAM_REG_USED(pVM, &pStats->StatRZInc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Inc", STAMUNIT_OCCURENCES, "The number of times INC was successfully interpreted.");
175 STAM_REG_USED(pVM, &pStats->StatR3Inc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Inc", STAMUNIT_OCCURENCES, "The number of times INC was successfully interpreted.");
176 STAM_REG_USED(pVM, &pStats->StatRZInvlPg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Invlpg", STAMUNIT_OCCURENCES, "The number of times INVLPG was successfully interpreted.");
177 STAM_REG_USED(pVM, &pStats->StatR3InvlPg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Invlpg", STAMUNIT_OCCURENCES, "The number of times INVLPG was successfully interpreted.");
178 STAM_REG_USED(pVM, &pStats->StatRZIret, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was successfully interpreted.");
179 STAM_REG_USED(pVM, &pStats->StatR3Iret, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was successfully interpreted.");
180 STAM_REG_USED(pVM, &pStats->StatRZLLdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was successfully interpreted.");
181 STAM_REG_USED(pVM, &pStats->StatR3LLdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was successfully interpreted.");
182 STAM_REG_USED(pVM, &pStats->StatRZLIdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/LIdt", STAMUNIT_OCCURENCES, "The number of times LIDT was successfully interpreted.");
183 STAM_REG_USED(pVM, &pStats->StatR3LIdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/LIdt", STAMUNIT_OCCURENCES, "The number of times LIDT was successfully interpreted.");
184 STAM_REG_USED(pVM, &pStats->StatRZLGdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/LGdt", STAMUNIT_OCCURENCES, "The number of times LGDT was successfully interpreted.");
185 STAM_REG_USED(pVM, &pStats->StatR3LGdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/LGdt", STAMUNIT_OCCURENCES, "The number of times LGDT was successfully interpreted.");
186 STAM_REG_USED(pVM, &pStats->StatRZMov, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was successfully interpreted.");
187 STAM_REG_USED(pVM, &pStats->StatR3Mov, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was successfully interpreted.");
188 STAM_REG_USED(pVM, &pStats->StatRZMovCRx, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was successfully interpreted.");
189 STAM_REG_USED(pVM, &pStats->StatR3MovCRx, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was successfully interpreted.");
190 STAM_REG_USED(pVM, &pStats->StatRZMovDRx, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was successfully interpreted.");
191 STAM_REG_USED(pVM, &pStats->StatR3MovDRx, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was successfully interpreted.");
192 STAM_REG_USED(pVM, &pStats->StatRZOr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Or", STAMUNIT_OCCURENCES, "The number of times OR was successfully interpreted.");
193 STAM_REG_USED(pVM, &pStats->StatR3Or, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Or", STAMUNIT_OCCURENCES, "The number of times OR was successfully interpreted.");
194 STAM_REG_USED(pVM, &pStats->StatRZPop, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Pop", STAMUNIT_OCCURENCES, "The number of times POP was successfully interpreted.");
195 STAM_REG_USED(pVM, &pStats->StatR3Pop, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Pop", STAMUNIT_OCCURENCES, "The number of times POP was successfully interpreted.");
196 STAM_REG_USED(pVM, &pStats->StatRZRdtsc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was successfully interpreted.");
197 STAM_REG_USED(pVM, &pStats->StatR3Rdtsc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was successfully interpreted.");
198 STAM_REG_USED(pVM, &pStats->StatRZSti, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Sti", STAMUNIT_OCCURENCES, "The number of times STI was successfully interpreted.");
199 STAM_REG_USED(pVM, &pStats->StatR3Sti, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Sti", STAMUNIT_OCCURENCES, "The number of times STI was successfully interpreted.");
200 STAM_REG_USED(pVM, &pStats->StatRZXchg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was successfully interpreted.");
201 STAM_REG_USED(pVM, &pStats->StatR3Xchg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was successfully interpreted.");
202 STAM_REG_USED(pVM, &pStats->StatRZXor, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was successfully interpreted.");
203 STAM_REG_USED(pVM, &pStats->StatR3Xor, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was successfully interpreted.");
204 STAM_REG_USED(pVM, &pStats->StatRZMonitor, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was successfully interpreted.");
205 STAM_REG_USED(pVM, &pStats->StatR3Monitor, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was successfully interpreted.");
206 STAM_REG_USED(pVM, &pStats->StatRZMWait, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/MWait", STAMUNIT_OCCURENCES, "The number of times MWAIT was successfully interpreted.");
207 STAM_REG_USED(pVM, &pStats->StatR3MWait, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/MWait", STAMUNIT_OCCURENCES, "The number of times MWAIT was successfully interpreted.");
208 STAM_REG_USED(pVM, &pStats->StatRZBtr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was successfully interpreted.");
209 STAM_REG_USED(pVM, &pStats->StatR3Btr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was successfully interpreted.");
210 STAM_REG_USED(pVM, &pStats->StatRZBts, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was successfully interpreted.");
211 STAM_REG_USED(pVM, &pStats->StatR3Bts, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was successfully interpreted.");
212 STAM_REG_USED(pVM, &pStats->StatRZBtc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was successfully interpreted.");
213 STAM_REG_USED(pVM, &pStats->StatR3Btc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was successfully interpreted.");
214 STAM_REG_USED(pVM, &pStats->StatRZCmpXchg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was successfully interpreted.");
215 STAM_REG_USED(pVM, &pStats->StatR3CmpXchg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was successfully interpreted.");
216 STAM_REG_USED(pVM, &pStats->StatRZCmpXchg8b, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was successfully interpreted.");
217 STAM_REG_USED(pVM, &pStats->StatR3CmpXchg8b, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was successfully interpreted.");
218 STAM_REG_USED(pVM, &pStats->StatRZXAdd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was successfully interpreted.");
219 STAM_REG_USED(pVM, &pStats->StatR3XAdd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was successfully interpreted.");
220 STAM_REG_USED(pVM, &pStats->StatR3Rdmsr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Rdmsr", STAMUNIT_OCCURENCES, "The number of times RDMSR was not interpreted.");
221 STAM_REG_USED(pVM, &pStats->StatRZRdmsr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Rdmsr", STAMUNIT_OCCURENCES, "The number of times RDMSR was not interpreted.");
222 STAM_REG_USED(pVM, &pStats->StatR3Wrmsr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Wrmsr", STAMUNIT_OCCURENCES, "The number of times WRMSR was not interpreted.");
223 STAM_REG_USED(pVM, &pStats->StatRZWrmsr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Wrmsr", STAMUNIT_OCCURENCES, "The number of times WRMSR was not interpreted.");
224 STAM_REG_USED(pVM, &pStats->StatR3StosWD, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Stoswd", STAMUNIT_OCCURENCES, "The number of times STOSWD was not interpreted.");
225 STAM_REG_USED(pVM, &pStats->StatRZStosWD, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Stoswd", STAMUNIT_OCCURENCES, "The number of times STOSWD was not interpreted.");
226
227 STAM_REG(pVM, &pStats->StatRZInterpretFailed, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed", STAMUNIT_OCCURENCES, "The number of times an instruction was not interpreted.");
228 STAM_REG(pVM, &pStats->StatR3InterpretFailed, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed", STAMUNIT_OCCURENCES, "The number of times an instruction was not interpreted.");
229
230 STAM_REG_USED(pVM, &pStats->StatRZFailedAnd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/And", STAMUNIT_OCCURENCES, "The number of times AND was not interpreted.");
231 STAM_REG_USED(pVM, &pStats->StatR3FailedAnd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/And", STAMUNIT_OCCURENCES, "The number of times AND was not interpreted.");
232 STAM_REG_USED(pVM, &pStats->StatRZFailedCpuId, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was not interpreted.");
233 STAM_REG_USED(pVM, &pStats->StatR3FailedCpuId, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was not interpreted.");
234 STAM_REG_USED(pVM, &pStats->StatRZFailedDec, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was not interpreted.");
235 STAM_REG_USED(pVM, &pStats->StatR3FailedDec, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was not interpreted.");
236 STAM_REG_USED(pVM, &pStats->StatRZFailedHlt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was not interpreted.");
237 STAM_REG_USED(pVM, &pStats->StatR3FailedHlt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was not interpreted.");
238 STAM_REG_USED(pVM, &pStats->StatRZFailedInc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Inc", STAMUNIT_OCCURENCES, "The number of times INC was not interpreted.");
239 STAM_REG_USED(pVM, &pStats->StatR3FailedInc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Inc", STAMUNIT_OCCURENCES, "The number of times INC was not interpreted.");
240 STAM_REG_USED(pVM, &pStats->StatRZFailedInvlPg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/InvlPg", STAMUNIT_OCCURENCES, "The number of times INVLPG was not interpreted.");
241 STAM_REG_USED(pVM, &pStats->StatR3FailedInvlPg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/InvlPg", STAMUNIT_OCCURENCES, "The number of times INVLPG was not interpreted.");
242 STAM_REG_USED(pVM, &pStats->StatRZFailedIret, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was not interpreted.");
243 STAM_REG_USED(pVM, &pStats->StatR3FailedIret, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was not interpreted.");
244 STAM_REG_USED(pVM, &pStats->StatRZFailedLLdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was not interpreted.");
245 STAM_REG_USED(pVM, &pStats->StatR3FailedLLdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was not interpreted.");
246 STAM_REG_USED(pVM, &pStats->StatRZFailedLIdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/LIdt", STAMUNIT_OCCURENCES, "The number of times LIDT was not interpreted.");
247 STAM_REG_USED(pVM, &pStats->StatR3FailedLIdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/LIdt", STAMUNIT_OCCURENCES, "The number of times LIDT was not interpreted.");
248 STAM_REG_USED(pVM, &pStats->StatRZFailedLGdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/LGdt", STAMUNIT_OCCURENCES, "The number of times LGDT was not interpreted.");
249 STAM_REG_USED(pVM, &pStats->StatR3FailedLGdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/LGdt", STAMUNIT_OCCURENCES, "The number of times LGDT was not interpreted.");
250 STAM_REG_USED(pVM, &pStats->StatRZFailedMov, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was not interpreted.");
251 STAM_REG_USED(pVM, &pStats->StatR3FailedMov, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was not interpreted.");
252 STAM_REG_USED(pVM, &pStats->StatRZFailedMovCRx, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was not interpreted.");
253 STAM_REG_USED(pVM, &pStats->StatR3FailedMovCRx, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was not interpreted.");
254 STAM_REG_USED(pVM, &pStats->StatRZFailedMovDRx, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was not interpreted.");
255 STAM_REG_USED(pVM, &pStats->StatR3FailedMovDRx, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was not interpreted.");
256 STAM_REG_USED(pVM, &pStats->StatRZFailedOr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Or", STAMUNIT_OCCURENCES, "The number of times OR was not interpreted.");
257 STAM_REG_USED(pVM, &pStats->StatR3FailedOr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Or", STAMUNIT_OCCURENCES, "The number of times OR was not interpreted.");
258 STAM_REG_USED(pVM, &pStats->StatRZFailedPop, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Pop", STAMUNIT_OCCURENCES, "The number of times POP was not interpreted.");
259 STAM_REG_USED(pVM, &pStats->StatR3FailedPop, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Pop", STAMUNIT_OCCURENCES, "The number of times POP was not interpreted.");
260 STAM_REG_USED(pVM, &pStats->StatRZFailedSti, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Sti", STAMUNIT_OCCURENCES, "The number of times STI was not interpreted.");
261 STAM_REG_USED(pVM, &pStats->StatR3FailedSti, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Sti", STAMUNIT_OCCURENCES, "The number of times STI was not interpreted.");
262 STAM_REG_USED(pVM, &pStats->StatRZFailedXchg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was not interpreted.");
263 STAM_REG_USED(pVM, &pStats->StatR3FailedXchg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was not interpreted.");
264 STAM_REG_USED(pVM, &pStats->StatRZFailedXor, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was not interpreted.");
265 STAM_REG_USED(pVM, &pStats->StatR3FailedXor, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was not interpreted.");
266 STAM_REG_USED(pVM, &pStats->StatRZFailedMonitor, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
267 STAM_REG_USED(pVM, &pStats->StatR3FailedMonitor, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
268 STAM_REG_USED(pVM, &pStats->StatRZFailedMWait, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/MWait", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
269 STAM_REG_USED(pVM, &pStats->StatR3FailedMWait, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/MWait", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
270 STAM_REG_USED(pVM, &pStats->StatRZFailedRdtsc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was not interpreted.");
271 STAM_REG_USED(pVM, &pStats->StatR3FailedRdtsc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was not interpreted.");
272 STAM_REG_USED(pVM, &pStats->StatRZFailedRdmsr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Rdmsr", STAMUNIT_OCCURENCES, "The number of times RDMSR was not interpreted.");
273 STAM_REG_USED(pVM, &pStats->StatR3FailedRdmsr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Rdmsr", STAMUNIT_OCCURENCES, "The number of times RDMSR was not interpreted.");
274 STAM_REG_USED(pVM, &pStats->StatRZFailedWrmsr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Wrmsr", STAMUNIT_OCCURENCES, "The number of times WRMSR was not interpreted.");
275 STAM_REG_USED(pVM, &pStats->StatR3FailedWrmsr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Wrmsr", STAMUNIT_OCCURENCES, "The number of times WRMSR was not interpreted.");
276
277 STAM_REG_USED(pVM, &pStats->StatRZFailedMisc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Misc", STAMUNIT_OCCURENCES, "The number of times some misc instruction was encountered.");
278 STAM_REG_USED(pVM, &pStats->StatR3FailedMisc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Misc", STAMUNIT_OCCURENCES, "The number of times some misc instruction was encountered.");
279 STAM_REG_USED(pVM, &pStats->StatRZFailedAdd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Add", STAMUNIT_OCCURENCES, "The number of times ADD was not interpreted.");
280 STAM_REG_USED(pVM, &pStats->StatR3FailedAdd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Add", STAMUNIT_OCCURENCES, "The number of times ADD was not interpreted.");
281 STAM_REG_USED(pVM, &pStats->StatRZFailedAdc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was not interpreted.");
282 STAM_REG_USED(pVM, &pStats->StatR3FailedAdc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was not interpreted.");
283 STAM_REG_USED(pVM, &pStats->StatRZFailedBtr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was not interpreted.");
284 STAM_REG_USED(pVM, &pStats->StatR3FailedBtr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was not interpreted.");
285 STAM_REG_USED(pVM, &pStats->StatRZFailedBts, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was not interpreted.");
286 STAM_REG_USED(pVM, &pStats->StatR3FailedBts, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was not interpreted.");
287 STAM_REG_USED(pVM, &pStats->StatRZFailedBtc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was not interpreted.");
288 STAM_REG_USED(pVM, &pStats->StatR3FailedBtc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was not interpreted.");
289 STAM_REG_USED(pVM, &pStats->StatRZFailedCli, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Cli", STAMUNIT_OCCURENCES, "The number of times CLI was not interpreted.");
290 STAM_REG_USED(pVM, &pStats->StatR3FailedCli, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Cli", STAMUNIT_OCCURENCES, "The number of times CLI was not interpreted.");
291 STAM_REG_USED(pVM, &pStats->StatRZFailedCmpXchg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was not interpreted.");
292 STAM_REG_USED(pVM, &pStats->StatR3FailedCmpXchg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was not interpreted.");
293 STAM_REG_USED(pVM, &pStats->StatRZFailedCmpXchg8b, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was not interpreted.");
294 STAM_REG_USED(pVM, &pStats->StatR3FailedCmpXchg8b, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was not interpreted.");
295 STAM_REG_USED(pVM, &pStats->StatRZFailedXAdd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was not interpreted.");
296 STAM_REG_USED(pVM, &pStats->StatR3FailedXAdd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was not interpreted.");
297 STAM_REG_USED(pVM, &pStats->StatRZFailedMovNTPS, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/MovNTPS", STAMUNIT_OCCURENCES, "The number of times MOVNTPS was not interpreted.");
298 STAM_REG_USED(pVM, &pStats->StatR3FailedMovNTPS, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/MovNTPS", STAMUNIT_OCCURENCES, "The number of times MOVNTPS was not interpreted.");
299 STAM_REG_USED(pVM, &pStats->StatRZFailedStosWD, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/StosWD", STAMUNIT_OCCURENCES, "The number of times STOSWD was not interpreted.");
300 STAM_REG_USED(pVM, &pStats->StatR3FailedStosWD, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/StosWD", STAMUNIT_OCCURENCES, "The number of times STOSWD was not interpreted.");
301 STAM_REG_USED(pVM, &pStats->StatRZFailedSub, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was not interpreted.");
302 STAM_REG_USED(pVM, &pStats->StatR3FailedSub, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was not interpreted.");
303 STAM_REG_USED(pVM, &pStats->StatRZFailedWbInvd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/WbInvd", STAMUNIT_OCCURENCES, "The number of times WBINVD was not interpreted.");
304 STAM_REG_USED(pVM, &pStats->StatR3FailedWbInvd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/WbInvd", STAMUNIT_OCCURENCES, "The number of times WBINVD was not interpreted.");
305
306 STAM_REG_USED(pVM, &pStats->StatRZFailedUserMode, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/UserMode", STAMUNIT_OCCURENCES, "The number of rejections because of CPL.");
307 STAM_REG_USED(pVM, &pStats->StatR3FailedUserMode, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/UserMode", STAMUNIT_OCCURENCES, "The number of rejections because of CPL.");
308 STAM_REG_USED(pVM, &pStats->StatRZFailedPrefix, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Prefix", STAMUNIT_OCCURENCES, "The number of rejections because of prefix .");
309 STAM_REG_USED(pVM, &pStats->StatR3FailedPrefix, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Prefix", STAMUNIT_OCCURENCES, "The number of rejections because of prefix .");
310
311 STAM_REG_USED(pVM, &pStats->StatCli, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Cli", STAMUNIT_OCCURENCES, "Number of cli instructions.");
312 STAM_REG_USED(pVM, &pStats->StatSti, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Sti", STAMUNIT_OCCURENCES, "Number of sli instructions.");
313 STAM_REG_USED(pVM, &pStats->StatIn, STAMTYPE_COUNTER, "/EM/R3/PrivInst/In", STAMUNIT_OCCURENCES, "Number of in instructions.");
314 STAM_REG_USED(pVM, &pStats->StatOut, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Out", STAMUNIT_OCCURENCES, "Number of out instructions.");
315 STAM_REG_USED(pVM, &pStats->StatHlt, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Hlt", STAMUNIT_OCCURENCES, "Number of hlt instructions not handled in GC because of PATM.");
316 STAM_REG_USED(pVM, &pStats->StatInvlpg, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Invlpg", STAMUNIT_OCCURENCES, "Number of invlpg instructions.");
317 STAM_REG_USED(pVM, &pStats->StatMisc, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Misc", STAMUNIT_OCCURENCES, "Number of misc. instructions.");
318 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[0], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR0, X", STAMUNIT_OCCURENCES, "Number of mov CR0 read instructions.");
319 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[1], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR1, X", STAMUNIT_OCCURENCES, "Number of mov CR1 read instructions.");
320 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[2], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR2, X", STAMUNIT_OCCURENCES, "Number of mov CR2 read instructions.");
321 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[3], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR3, X", STAMUNIT_OCCURENCES, "Number of mov CR3 read instructions.");
322 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[4], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR4, X", STAMUNIT_OCCURENCES, "Number of mov CR4 read instructions.");
323 STAM_REG_USED(pVM, &pStats->StatMovReadCR[0], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR0", STAMUNIT_OCCURENCES, "Number of mov CR0 write instructions.");
324 STAM_REG_USED(pVM, &pStats->StatMovReadCR[1], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR1", STAMUNIT_OCCURENCES, "Number of mov CR1 write instructions.");
325 STAM_REG_USED(pVM, &pStats->StatMovReadCR[2], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR2", STAMUNIT_OCCURENCES, "Number of mov CR2 write instructions.");
326 STAM_REG_USED(pVM, &pStats->StatMovReadCR[3], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR3", STAMUNIT_OCCURENCES, "Number of mov CR3 write instructions.");
327 STAM_REG_USED(pVM, &pStats->StatMovReadCR[4], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR4", STAMUNIT_OCCURENCES, "Number of mov CR4 write instructions.");
328 STAM_REG_USED(pVM, &pStats->StatMovDRx, STAMTYPE_COUNTER, "/EM/R3/PrivInst/MovDRx", STAMUNIT_OCCURENCES, "Number of mov DRx instructions.");
329 STAM_REG_USED(pVM, &pStats->StatIret, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Iret", STAMUNIT_OCCURENCES, "Number of iret instructions.");
330 STAM_REG_USED(pVM, &pStats->StatMovLgdt, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Lgdt", STAMUNIT_OCCURENCES, "Number of lgdt instructions.");
331 STAM_REG_USED(pVM, &pStats->StatMovLidt, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Lidt", STAMUNIT_OCCURENCES, "Number of lidt instructions.");
332 STAM_REG_USED(pVM, &pStats->StatMovLldt, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Lldt", STAMUNIT_OCCURENCES, "Number of lldt instructions.");
333 STAM_REG_USED(pVM, &pStats->StatSysEnter, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Sysenter", STAMUNIT_OCCURENCES, "Number of sysenter instructions.");
334 STAM_REG_USED(pVM, &pStats->StatSysExit, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Sysexit", STAMUNIT_OCCURENCES, "Number of sysexit instructions.");
335 STAM_REG_USED(pVM, &pStats->StatSysCall, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Syscall", STAMUNIT_OCCURENCES, "Number of syscall instructions.");
336 STAM_REG_USED(pVM, &pStats->StatSysRet, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Sysret", STAMUNIT_OCCURENCES, "Number of sysret instructions.");
337
338 STAM_REG(pVM, &pVM->em.s.StatTotalClis, STAMTYPE_COUNTER, "/EM/Cli/Total", STAMUNIT_OCCURENCES, "Total number of cli instructions executed.");
339 pVM->em.s.pCliStatTree = 0;
340#endif /* VBOX_WITH_STATISTICS */
341
342 /* these should be considered for release statistics. */
343 STAM_REL_REG(pVM, &pVM->em.s.StatForcedActions, STAMTYPE_PROFILE, "/PROF/EM/ForcedActions", STAMUNIT_TICKS_PER_CALL, "Profiling forced action execution.");
344 STAM_REG(pVM, &pVM->em.s.StatIOEmu, STAMTYPE_PROFILE, "/PROF/EM/Emulation/IO", STAMUNIT_TICKS_PER_CALL, "Profiling of emR3RawExecuteIOInstruction.");
345 STAM_REG(pVM, &pVM->em.s.StatPrivEmu, STAMTYPE_PROFILE, "/PROF/EM/Emulation/Priv", STAMUNIT_TICKS_PER_CALL, "Profiling of emR3RawPrivileged.");
346 STAM_REG(pVM, &pVM->em.s.StatMiscEmu, STAMTYPE_PROFILE, "/PROF/EM/Emulation/Misc", STAMUNIT_TICKS_PER_CALL, "Profiling of emR3RawExecuteInstruction.");
347
348 STAM_REL_REG(pVM, &pVM->em.s.StatHalted, STAMTYPE_PROFILE, "/PROF/EM/Halted", STAMUNIT_TICKS_PER_CALL, "Profiling halted state (VMR3WaitHalted).");
349 STAM_REG(pVM, &pVM->em.s.StatHwAccEntry, STAMTYPE_PROFILE, "/PROF/EM/HwAccEnter", STAMUNIT_TICKS_PER_CALL, "Profiling Hardware Accelerated Mode entry overhead.");
350 STAM_REG(pVM, &pVM->em.s.StatHwAccExec, STAMTYPE_PROFILE, "/PROF/EM/HwAccExec", STAMUNIT_TICKS_PER_CALL, "Profiling Hardware Accelerated Mode execution.");
351 STAM_REG(pVM, &pVM->em.s.StatREMEmu, STAMTYPE_PROFILE, "/PROF/EM/REMEmuSingle", STAMUNIT_TICKS_PER_CALL, "Profiling single instruction REM execution.");
352 STAM_REG(pVM, &pVM->em.s.StatREMExec, STAMTYPE_PROFILE, "/PROF/EM/REMExec", STAMUNIT_TICKS_PER_CALL, "Profiling REM execution.");
353 STAM_REG(pVM, &pVM->em.s.StatREMSync, STAMTYPE_PROFILE, "/PROF/EM/REMSync", STAMUNIT_TICKS_PER_CALL, "Profiling REM context syncing.");
354 STAM_REL_REG(pVM, &pVM->em.s.StatREMTotal, STAMTYPE_PROFILE, "/PROF/EM/REMTotal", STAMUNIT_TICKS_PER_CALL, "Profiling emR3RemExecute (excluding FFs).");
355 STAM_REG(pVM, &pVM->em.s.StatRAWEntry, STAMTYPE_PROFILE, "/PROF/EM/RAWEnter", STAMUNIT_TICKS_PER_CALL, "Profiling Raw Mode entry overhead.");
356 STAM_REG(pVM, &pVM->em.s.StatRAWExec, STAMTYPE_PROFILE, "/PROF/EM/RAWExec", STAMUNIT_TICKS_PER_CALL, "Profiling Raw Mode execution.");
357 STAM_REG(pVM, &pVM->em.s.StatRAWTail, STAMTYPE_PROFILE, "/PROF/EM/RAWTail", STAMUNIT_TICKS_PER_CALL, "Profiling Raw Mode tail overhead.");
358 STAM_REL_REG(pVM, &pVM->em.s.StatRAWTotal, STAMTYPE_PROFILE, "/PROF/EM/RAWTotal", STAMUNIT_TICKS_PER_CALL, "Profiling emR3RawExecute (excluding FFs).");
359 STAM_REL_REG(pVM, &pVM->em.s.StatTotal, STAMTYPE_PROFILE_ADV, "/PROF/EM/Total", STAMUNIT_TICKS_PER_CALL, "Profiling EMR3ExecuteVM.");
360
361
362 return VINF_SUCCESS;
363}
364
365
366/**
367 * Applies relocations to data and code managed by this
368 * component. This function will be called at init and
369 * whenever the VMM need to relocate it self inside the GC.
370 *
371 * @param pVM The VM.
372 */
373EMR3DECL(void) EMR3Relocate(PVM pVM)
374{
375 LogFlow(("EMR3Relocate\n"));
376 if (pVM->em.s.pStatsR3)
377 pVM->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVM->em.s.pStatsR3);
378}
379
380
381/**
382 * Reset notification.
383 *
384 * @param pVM
385 */
386EMR3DECL(void) EMR3Reset(PVM pVM)
387{
388 LogFlow(("EMR3Reset: \n"));
389 pVM->em.s.fForceRAW = false;
390}
391
392
393/**
394 * Terminates the EM.
395 *
396 * Termination means cleaning up and freeing all resources,
397 * the VM it self is at this point powered off or suspended.
398 *
399 * @returns VBox status code.
400 * @param pVM The VM to operate on.
401 */
402EMR3DECL(int) EMR3Term(PVM pVM)
403{
404 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
405
406 return VINF_SUCCESS;
407}
408
409
410/**
411 * Execute state save operation.
412 *
413 * @returns VBox status code.
414 * @param pVM VM Handle.
415 * @param pSSM SSM operation handle.
416 */
417static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
418{
419 return SSMR3PutBool(pSSM, pVM->em.s.fForceRAW);
420}
421
422
423/**
424 * Execute state load operation.
425 *
426 * @returns VBox status code.
427 * @param pVM VM Handle.
428 * @param pSSM SSM operation handle.
429 * @param u32Version Data layout version.
430 */
431static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
432{
433 /*
434 * Validate version.
435 */
436 if (u32Version != EM_SAVED_STATE_VERSION)
437 {
438 AssertMsgFailed(("emR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, EM_SAVED_STATE_VERSION));
439 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
440 }
441
442 /*
443 * Load the saved state.
444 */
445 int rc = SSMR3GetBool(pSSM, &pVM->em.s.fForceRAW);
446 if (VBOX_FAILURE(rc))
447 pVM->em.s.fForceRAW = false;
448
449 Assert(!pVM->em.s.pCliStatTree);
450 return rc;
451}
452
453
454/**
455 * Enables or disables a set of raw-mode execution modes.
456 *
457 * @returns VINF_SUCCESS on success.
458 * @returns VINF_RESCHEDULE if a rescheduling might be required.
459 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
460 *
461 * @param pVM The VM to operate on.
462 * @param enmMode The execution mode change.
463 * @thread The emulation thread.
464 */
465EMR3DECL(int) EMR3RawSetMode(PVM pVM, EMRAWMODE enmMode)
466{
467 switch (enmMode)
468 {
469 case EMRAW_NONE:
470 pVM->fRawR3Enabled = false;
471 pVM->fRawR0Enabled = false;
472 break;
473 case EMRAW_RING3_ENABLE:
474 pVM->fRawR3Enabled = true;
475 break;
476 case EMRAW_RING3_DISABLE:
477 pVM->fRawR3Enabled = false;
478 break;
479 case EMRAW_RING0_ENABLE:
480 pVM->fRawR0Enabled = true;
481 break;
482 case EMRAW_RING0_DISABLE:
483 pVM->fRawR0Enabled = false;
484 break;
485 default:
486 AssertMsgFailed(("Invalid enmMode=%d\n", enmMode));
487 return VERR_INVALID_PARAMETER;
488 }
489 Log(("EMR3SetRawMode: fRawR3Enabled=%RTbool fRawR0Enabled=%RTbool\n",
490 pVM->fRawR3Enabled, pVM->fRawR0Enabled));
491 return pVM->em.s.enmState == EMSTATE_RAW ? VINF_EM_RESCHEDULE : VINF_SUCCESS;
492}
493
494
495/**
496 * Raise a fatal error.
497 *
498 * Safely terminate the VM with full state report and stuff. This function
499 * will naturally never return.
500 *
501 * @param pVM VM handle.
502 * @param rc VBox status code.
503 */
504EMR3DECL(void) EMR3FatalError(PVM pVM, int rc)
505{
506 longjmp(pVM->em.s.u.FatalLongJump, rc);
507 AssertReleaseMsgFailed(("longjmp returned!\n"));
508}
509
510
511/**
512 * Gets the EM state name.
513 *
514 * @returns pointer to read only state name,
515 * @param enmState The state.
516 */
517EMR3DECL(const char *) EMR3GetStateName(EMSTATE enmState)
518{
519 switch (enmState)
520 {
521 case EMSTATE_NONE: return "EMSTATE_NONE";
522 case EMSTATE_RAW: return "EMSTATE_RAW";
523 case EMSTATE_HWACC: return "EMSTATE_HWACC";
524 case EMSTATE_REM: return "EMSTATE_REM";
525 case EMSTATE_HALTED: return "EMSTATE_HALTED";
526 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
527 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
528 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
529 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
530 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
531 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
532 default: return "Unknown!";
533 }
534}
535
536
537#ifdef VBOX_WITH_STATISTICS
538/**
539 * Just a braindead function to keep track of cli addresses.
540 * @param pVM VM handle.
541 * @param pInstrGC The EIP of the cli instruction.
542 */
543static void emR3RecordCli(PVM pVM, RTGCPTR pInstrGC)
544{
545 PCLISTAT pRec;
546
547 pRec = (PCLISTAT)RTAvlPVGet(&pVM->em.s.pCliStatTree, (AVLPVKEY)pInstrGC);
548 if (!pRec)
549 {
550 /* New cli instruction; insert into the tree. */
551 pRec = (PCLISTAT)MMR3HeapAllocZ(pVM, MM_TAG_EM, sizeof(*pRec));
552 Assert(pRec);
553 if (!pRec)
554 return;
555 pRec->Core.Key = (AVLPVKEY)pInstrGC;
556
557 char szCliStatName[32];
558 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%VGv", pInstrGC);
559 STAM_REG(pVM, &pRec->Counter, STAMTYPE_COUNTER, szCliStatName, STAMUNIT_OCCURENCES, "Number of times cli was executed.");
560
561 bool fRc = RTAvlPVInsert(&pVM->em.s.pCliStatTree, &pRec->Core);
562 Assert(fRc); NOREF(fRc);
563 }
564 STAM_COUNTER_INC(&pRec->Counter);
565 STAM_COUNTER_INC(&pVM->em.s.StatTotalClis);
566}
567#endif /* VBOX_WITH_STATISTICS */
568
569
570/**
571 * Debug loop.
572 *
573 * @returns VBox status code for EM.
574 * @param pVM VM handle.
575 * @param rc Current EM VBox status code..
576 */
577static int emR3Debug(PVM pVM, int rc)
578{
579 for (;;)
580 {
581 Log(("emR3Debug: rc=%Vrc\n", rc));
582 const int rcLast = rc;
583
584 /*
585 * Debug related RC.
586 */
587 switch (rc)
588 {
589 /*
590 * Single step an instruction.
591 */
592 case VINF_EM_DBG_STEP:
593 if ( pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
594 || pVM->em.s.enmState == EMSTATE_DEBUG_HYPER
595 || pVM->em.s.fForceRAW /* paranoia */)
596 rc = emR3RawStep(pVM);
597 else
598 {
599 Assert(pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
600 rc = emR3RemStep(pVM);
601 }
602 break;
603
604 /*
605 * Simple events: stepped, breakpoint, stop/assertion.
606 */
607 case VINF_EM_DBG_STEPPED:
608 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
609 break;
610
611 case VINF_EM_DBG_BREAKPOINT:
612 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
613 break;
614
615 case VINF_EM_DBG_STOP:
616 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
617 break;
618
619 case VINF_EM_DBG_HYPER_STEPPED:
620 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
621 break;
622
623 case VINF_EM_DBG_HYPER_BREAKPOINT:
624 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
625 break;
626
627 case VINF_EM_DBG_HYPER_ASSERTION:
628 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
629 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
630 break;
631
632 /*
633 * Guru meditation.
634 */
635 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
636 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
637 break;
638
639 default: /** @todo don't use default for guru, but make special errors code! */
640 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
641 break;
642 }
643
644 /*
645 * Process the result.
646 */
647 do
648 {
649 switch (rc)
650 {
651 /*
652 * Continue the debugging loop.
653 */
654 case VINF_EM_DBG_STEP:
655 case VINF_EM_DBG_STOP:
656 case VINF_EM_DBG_STEPPED:
657 case VINF_EM_DBG_BREAKPOINT:
658 case VINF_EM_DBG_HYPER_STEPPED:
659 case VINF_EM_DBG_HYPER_BREAKPOINT:
660 case VINF_EM_DBG_HYPER_ASSERTION:
661 break;
662
663 /*
664 * Resuming execution (in some form) has to be done here if we got
665 * a hypervisor debug event.
666 */
667 case VINF_SUCCESS:
668 case VINF_EM_RESUME:
669 case VINF_EM_SUSPEND:
670 case VINF_EM_RESCHEDULE:
671 case VINF_EM_RESCHEDULE_RAW:
672 case VINF_EM_RESCHEDULE_REM:
673 case VINF_EM_HALT:
674 if (pVM->em.s.enmState == EMSTATE_DEBUG_HYPER)
675 {
676 rc = emR3RawResumeHyper(pVM);
677 if (rc != VINF_SUCCESS && VBOX_SUCCESS(rc))
678 continue;
679 }
680 if (rc == VINF_SUCCESS)
681 rc = VINF_EM_RESCHEDULE;
682 return rc;
683
684 /*
685 * The debugger isn't attached.
686 * We'll simply turn the thing off since that's the easiest thing to do.
687 */
688 case VERR_DBGF_NOT_ATTACHED:
689 switch (rcLast)
690 {
691 case VINF_EM_DBG_HYPER_ASSERTION:
692 case VINF_EM_DBG_HYPER_STEPPED:
693 case VINF_EM_DBG_HYPER_BREAKPOINT:
694 return rcLast;
695 }
696 return VINF_EM_OFF;
697
698 /*
699 * Status codes terminating the VM in one or another sense.
700 */
701 case VINF_EM_TERMINATE:
702 case VINF_EM_OFF:
703 case VINF_EM_RESET:
704 case VINF_EM_RAW_STALE_SELECTOR:
705 case VINF_EM_RAW_IRET_TRAP:
706 case VERR_TRPM_PANIC:
707 case VERR_TRPM_DONT_PANIC:
708 case VERR_INTERNAL_ERROR:
709 return rc;
710
711 /*
712 * The rest is unexpected, and will keep us here.
713 */
714 default:
715 AssertMsgFailed(("Unxpected rc %Vrc!\n", rc));
716 break;
717 }
718 } while (false);
719 } /* debug for ever */
720}
721
722
723/**
724 * Steps recompiled code.
725 *
726 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
727 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
728 *
729 * @param pVM VM handle.
730 */
731static int emR3RemStep(PVM pVM)
732{
733 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
734
735 /*
736 * Switch to REM, step instruction, switch back.
737 */
738 int rc = REMR3State(pVM, pVM->em.s.fREMFlushTBs);
739 if (VBOX_SUCCESS(rc))
740 {
741 rc = REMR3Step(pVM);
742 REMR3StateBack(pVM);
743 pVM->em.s.fREMFlushTBs = false;
744 }
745 LogFlow(("emR3RemStep: returns %Vrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
746 return rc;
747}
748
749
750/**
751 * Executes recompiled code.
752 *
753 * This function contains the recompiler version of the inner
754 * execution loop (the outer loop being in EMR3ExecuteVM()).
755 *
756 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
757 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
758 *
759 * @param pVM VM handle.
760 * @param pfFFDone Where to store an indicator telling wheter or not
761 * FFs were done before returning.
762 *
763 */
764static int emR3RemExecute(PVM pVM, bool *pfFFDone)
765{
766#ifdef LOG_ENABLED
767 PCPUMCTX pCtx = pVM->em.s.pCtx;
768 uint32_t cpl = CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx));
769
770 if (pCtx->eflags.Bits.u1VM)
771 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
772 else
773 Log(("EMR%d: %08X ESP=%08X IF=%d CR0=%x\n", cpl, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0));
774#endif
775 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatREMTotal, a);
776
777#if defined(VBOX_STRICT) && defined(DEBUG_bird)
778 AssertMsg( VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3|VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
779 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVM)), /** @todo #1419 - get flat address. */
780 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
781#endif
782
783 /*
784 * Spin till we get a forced action which returns anything but VINF_SUCCESS
785 * or the REM suggests raw-mode execution.
786 */
787 *pfFFDone = false;
788 bool fInREMState = false;
789 int rc = VINF_SUCCESS;
790 for (;;)
791 {
792 /*
793 * Update REM state if not already in sync.
794 */
795 if (!fInREMState)
796 {
797 STAM_PROFILE_START(&pVM->em.s.StatREMSync, b);
798 rc = REMR3State(pVM, pVM->em.s.fREMFlushTBs);
799 STAM_PROFILE_STOP(&pVM->em.s.StatREMSync, b);
800 if (VBOX_FAILURE(rc))
801 break;
802 fInREMState = true;
803 pVM->em.s.fREMFlushTBs = false;
804
805 /*
806 * We might have missed the raising of VMREQ, TIMER and some other
807 * imporant FFs while we were busy switching the state. So, check again.
808 */
809 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_TIMER | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET))
810 {
811 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fForcedActions));
812 goto l_REMDoForcedActions;
813 }
814 }
815
816
817 /*
818 * Execute REM.
819 */
820 STAM_PROFILE_START(&pVM->em.s.StatREMExec, c);
821 rc = REMR3Run(pVM);
822 STAM_PROFILE_STOP(&pVM->em.s.StatREMExec, c);
823
824
825 /*
826 * Deal with high priority post execution FFs before doing anything else.
827 */
828 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
829 rc = emR3HighPriorityPostForcedActions(pVM, rc);
830
831 /*
832 * Process the returned status code.
833 * (Try keep this short! Call functions!)
834 */
835 if (rc != VINF_SUCCESS)
836 {
837 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
838 break;
839 if (rc != VINF_REM_INTERRUPED_FF)
840 {
841 /*
842 * Anything which is not known to us means an internal error
843 * and the termination of the VM!
844 */
845 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Vra\n", rc));
846 break;
847 }
848 }
849
850
851 /*
852 * Check and execute forced actions.
853 * Sync back the VM state before calling any of these.
854 */
855#ifdef VBOX_HIGH_RES_TIMERS_HACK
856 TMTimerPoll(pVM);
857#endif
858 if (VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK & ~(VM_FF_CSAM_PENDING_ACTION | VM_FF_CSAM_SCAN_PAGE)))
859 {
860l_REMDoForcedActions:
861 if (fInREMState)
862 {
863 STAM_PROFILE_START(&pVM->em.s.StatREMSync, d);
864 REMR3StateBack(pVM);
865 STAM_PROFILE_STOP(&pVM->em.s.StatREMSync, d);
866 fInREMState = false;
867 }
868 STAM_REL_PROFILE_ADV_SUSPEND(&pVM->em.s.StatREMTotal, a);
869 rc = emR3ForcedActions(pVM, rc);
870 STAM_REL_PROFILE_ADV_RESUME(&pVM->em.s.StatREMTotal, a);
871 if ( rc != VINF_SUCCESS
872 && rc != VINF_EM_RESCHEDULE_REM)
873 {
874 *pfFFDone = true;
875 break;
876 }
877 }
878
879 } /* The Inner Loop, recompiled execution mode version. */
880
881
882 /*
883 * Returning. Sync back the VM state if required.
884 */
885 if (fInREMState)
886 {
887 STAM_PROFILE_START(&pVM->em.s.StatREMSync, e);
888 REMR3StateBack(pVM);
889 STAM_PROFILE_STOP(&pVM->em.s.StatREMSync, e);
890 }
891
892 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatREMTotal, a);
893 return rc;
894}
895
896
897/**
898 * Resumes executing hypervisor after a debug event.
899 *
900 * This is kind of special since our current guest state is
901 * potentially out of sync.
902 *
903 * @returns VBox status code.
904 * @param pVM The VM handle.
905 */
906static int emR3RawResumeHyper(PVM pVM)
907{
908 int rc;
909 PCPUMCTX pCtx = pVM->em.s.pCtx;
910 Assert(pVM->em.s.enmState == EMSTATE_DEBUG_HYPER);
911 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", pCtx->cs, pCtx->eip, pCtx->eflags));
912
913 /*
914 * Resume execution.
915 */
916 CPUMRawEnter(pVM, NULL);
917 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_RF);
918 rc = VMMR3ResumeHyper(pVM);
919 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Vrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc));
920 rc = CPUMRawLeave(pVM, NULL, rc);
921 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
922
923 /*
924 * Deal with the return code.
925 */
926 rc = emR3HighPriorityPostForcedActions(pVM, rc);
927 rc = emR3RawHandleRC(pVM, pCtx, rc);
928 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
929 return rc;
930}
931
932
933/**
934 * Steps rawmode.
935 *
936 * @returns VBox status code.
937 * @param pVM The VM handle.
938 */
939static int emR3RawStep(PVM pVM)
940{
941 Assert( pVM->em.s.enmState == EMSTATE_DEBUG_HYPER
942 || pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
943 || pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
944 int rc;
945 PCPUMCTX pCtx = pVM->em.s.pCtx;
946 bool fGuest = pVM->em.s.enmState != EMSTATE_DEBUG_HYPER;
947#ifndef DEBUG_sandervl
948 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVM) : CPUMGetHyperCS(pVM),
949 fGuest ? CPUMGetGuestEIP(pVM) : CPUMGetHyperEIP(pVM), fGuest ? CPUMGetGuestEFlags(pVM) : CPUMGetHyperEFlags(pVM)));
950#endif
951 if (fGuest)
952 {
953 /*
954 * Check vital forced actions, but ignore pending interrupts and timers.
955 */
956 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
957 {
958 rc = emR3RawForcedActions(pVM, pCtx);
959 if (VBOX_FAILURE(rc))
960 return rc;
961 }
962
963 /*
964 * Set flags for single stepping.
965 */
966 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
967 }
968 else
969 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
970
971 /*
972 * Single step.
973 * We do not start time or anything, if anything we should just do a few nanoseconds.
974 */
975 CPUMRawEnter(pVM, NULL);
976 do
977 {
978 if (pVM->em.s.enmState == EMSTATE_DEBUG_HYPER)
979 rc = VMMR3ResumeHyper(pVM);
980 else
981 rc = VMMR3RawRunGC(pVM);
982#ifndef DEBUG_sandervl
983 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Vrc\n", fGuest ? CPUMGetGuestCS(pVM) : CPUMGetHyperCS(pVM),
984 fGuest ? CPUMGetGuestEIP(pVM) : CPUMGetHyperEIP(pVM), fGuest ? CPUMGetGuestEFlags(pVM) : CPUMGetHyperEFlags(pVM), rc));
985#endif
986 } while ( rc == VINF_SUCCESS
987 || rc == VINF_EM_RAW_INTERRUPT);
988 rc = CPUMRawLeave(pVM, NULL, rc);
989 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
990
991 /*
992 * Make sure the trap flag is cleared.
993 * (Too bad if the guest is trying to single step too.)
994 */
995 if (fGuest)
996 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
997 else
998 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) & ~X86_EFL_TF);
999
1000 /*
1001 * Deal with the return codes.
1002 */
1003 rc = emR3HighPriorityPostForcedActions(pVM, rc);
1004 rc = emR3RawHandleRC(pVM, pCtx, rc);
1005 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
1006 return rc;
1007}
1008
1009
1010#ifdef DEBUG
1011
1012/**
1013 * Steps hardware accelerated mode.
1014 *
1015 * @returns VBox status code.
1016 * @param pVM The VM handle.
1017 */
1018static int emR3HwAccStep(PVM pVM)
1019{
1020 Assert(pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC);
1021
1022 int rc;
1023 PCPUMCTX pCtx = pVM->em.s.pCtx;
1024 VM_FF_CLEAR(pVM, (VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS));
1025
1026 /*
1027 * Check vital forced actions, but ignore pending interrupts and timers.
1028 */
1029 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1030 {
1031 rc = emR3RawForcedActions(pVM, pCtx);
1032 if (VBOX_FAILURE(rc))
1033 return rc;
1034 }
1035 /*
1036 * Set flags for single stepping.
1037 */
1038 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
1039
1040 /*
1041 * Single step.
1042 * We do not start time or anything, if anything we should just do a few nanoseconds.
1043 */
1044 do
1045 {
1046 rc = VMMR3HwAccRunGC(pVM);
1047 } while ( rc == VINF_SUCCESS
1048 || rc == VINF_EM_RAW_INTERRUPT);
1049 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
1050
1051 /*
1052 * Make sure the trap flag is cleared.
1053 * (Too bad if the guest is trying to single step too.)
1054 */
1055 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1056
1057 /*
1058 * Deal with the return codes.
1059 */
1060 rc = emR3HighPriorityPostForcedActions(pVM, rc);
1061 rc = emR3RawHandleRC(pVM, pCtx, rc);
1062 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
1063 return rc;
1064}
1065
1066
1067void emR3SingleStepExecRaw(PVM pVM, uint32_t cIterations)
1068{
1069 EMSTATE enmOldState = pVM->em.s.enmState;
1070
1071 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1072
1073 Log(("Single step BEGIN:\n"));
1074 for (uint32_t i = 0; i < cIterations; i++)
1075 {
1076 DBGFR3PrgStep(pVM);
1077 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1078 emR3RawStep(pVM);
1079 }
1080 Log(("Single step END:\n"));
1081 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1082 pVM->em.s.enmState = enmOldState;
1083}
1084
1085
1086void emR3SingleStepExecHwAcc(PVM pVM, uint32_t cIterations)
1087{
1088 EMSTATE enmOldState = pVM->em.s.enmState;
1089
1090 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_HWACC;
1091
1092 Log(("Single step BEGIN:\n"));
1093 for (uint32_t i = 0; i < cIterations; i++)
1094 {
1095 DBGFR3PrgStep(pVM);
1096 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1097 emR3HwAccStep(pVM);
1098 }
1099 Log(("Single step END:\n"));
1100 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1101 pVM->em.s.enmState = enmOldState;
1102}
1103
1104
1105void emR3SingleStepExecRem(PVM pVM, uint32_t cIterations)
1106{
1107 EMSTATE enmOldState = pVM->em.s.enmState;
1108
1109 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1110
1111 Log(("Single step BEGIN:\n"));
1112 for (uint32_t i = 0; i < cIterations; i++)
1113 {
1114 DBGFR3PrgStep(pVM);
1115 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1116 emR3RemStep(pVM);
1117 }
1118 Log(("Single step END:\n"));
1119 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1120 pVM->em.s.enmState = enmOldState;
1121}
1122
1123#endif /* DEBUG */
1124
1125
1126/**
1127 * Executes one (or perhaps a few more) instruction(s).
1128 *
1129 * @returns VBox status code suitable for EM.
1130 *
1131 * @param pVM VM handle.
1132 * @param rcGC GC return code
1133 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1134 * instruction and prefix the log output with this text.
1135 */
1136#ifdef LOG_ENABLED
1137static int emR3RawExecuteInstructionWorker(PVM pVM, int rcGC, const char *pszPrefix)
1138#else
1139static int emR3RawExecuteInstructionWorker(PVM pVM, int rcGC)
1140#endif
1141{
1142 PCPUMCTX pCtx = pVM->em.s.pCtx;
1143 int rc;
1144
1145 /*
1146 *
1147 * The simple solution is to use the recompiler.
1148 * The better solution is to disassemble the current instruction and
1149 * try handle as many as possible without using REM.
1150 *
1151 */
1152
1153#ifdef LOG_ENABLED
1154 /*
1155 * Disassemble the instruction if requested.
1156 */
1157 if (pszPrefix)
1158 {
1159 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
1160 DBGFR3DisasInstrCurrentLog(pVM, pszPrefix);
1161 }
1162#endif /* LOG_ENABLED */
1163
1164 /*
1165 * PATM is making life more interesting.
1166 * We cannot hand anything to REM which has an EIP inside patch code. So, we'll
1167 * tell PATM there is a trap in this code and have it take the appropriate actions
1168 * to allow us execute the code in REM.
1169 */
1170 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1171 {
1172 Log(("emR3RawExecuteInstruction: In patch block. eip=%VRv\n", pCtx->eip));
1173
1174 RTGCPTR pNewEip;
1175 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1176 switch (rc)
1177 {
1178 /*
1179 * It's not very useful to emulate a single instruction and then go back to raw
1180 * mode; just execute the whole block until IF is set again.
1181 */
1182 case VINF_SUCCESS:
1183 Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %VGv IF=%d VMIF=%x\n",
1184 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1185 pCtx->eip = pNewEip;
1186 Assert(pCtx->eip);
1187
1188 if (pCtx->eflags.Bits.u1IF)
1189 {
1190 /*
1191 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1192 */
1193 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1194 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1195 }
1196 else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)
1197 {
1198 /* special case: iret, that sets IF, detected a pending irq/event */
1199 return emR3RawExecuteInstruction(pVM, "PATCHIRET");
1200 }
1201 return VINF_EM_RESCHEDULE_REM;
1202
1203 /*
1204 * One instruction.
1205 */
1206 case VINF_PATCH_EMULATE_INSTR:
1207 Log(("emR3RawExecuteInstruction: Emulate patched instruction at %VGv IF=%d VMIF=%x\n",
1208 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1209 pCtx->eip = pNewEip;
1210 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1211
1212 /*
1213 * The patch was disabled, hand it to the REM.
1214 */
1215 case VERR_PATCH_DISABLED:
1216 Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %VGv IF=%d VMIF=%x\n",
1217 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1218 pCtx->eip = pNewEip;
1219 if (pCtx->eflags.Bits.u1IF)
1220 {
1221 /*
1222 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1223 */
1224 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1225 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1226 }
1227 return VINF_EM_RESCHEDULE_REM;
1228
1229 /* Force continued patch exection; usually due to write monitored stack. */
1230 case VINF_PATCH_CONTINUE:
1231 return VINF_SUCCESS;
1232
1233 default:
1234 AssertReleaseMsgFailed(("Unknown return code %Vrc from PATMR3HandleTrap\n", rc));
1235 return VERR_INTERNAL_ERROR;
1236 }
1237 }
1238
1239#if 0
1240 /* Try our own instruction emulator before falling back to the recompiler. */
1241 DISCPUSTATE Cpu;
1242 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &Cpu, "GEN EMU");
1243 if (VBOX_SUCCESS(rc))
1244 {
1245 uint32_t size;
1246
1247 switch (Cpu.pCurInstr->opcode)
1248 {
1249 /* @todo we can do more now */
1250 case OP_MOV:
1251 case OP_AND:
1252 case OP_OR:
1253 case OP_XOR:
1254 case OP_POP:
1255 case OP_INC:
1256 case OP_DEC:
1257 case OP_XCHG:
1258 STAM_PROFILE_START(&pVM->em.s.StatMiscEmu, a);
1259 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1260 if (VBOX_SUCCESS(rc))
1261 {
1262 pCtx->rip += Cpu.opsize;
1263 STAM_PROFILE_STOP(&pVM->em.s.StatMiscEmu, a);
1264 return rc;
1265 }
1266 if (rc != VERR_EM_INTERPRETER)
1267 AssertMsgFailedReturn(("rc=%Vrc\n", rc), rc);
1268 STAM_PROFILE_STOP(&pVM->em.s.StatMiscEmu, a);
1269 break;
1270 }
1271 }
1272#endif /* 0 */
1273 STAM_PROFILE_START(&pVM->em.s.StatREMEmu, a);
1274 rc = REMR3EmulateInstruction(pVM);
1275 STAM_PROFILE_STOP(&pVM->em.s.StatREMEmu, a);
1276
1277 return rc;
1278}
1279
1280
1281/**
1282 * Executes one (or perhaps a few more) instruction(s).
1283 * This is just a wrapper for discarding pszPrefix in non-logging builds.
1284 *
1285 * @returns VBox status code suitable for EM.
1286 * @param pVM VM handle.
1287 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1288 * instruction and prefix the log output with this text.
1289 * @param rcGC GC return code
1290 */
1291DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, const char *pszPrefix, int rcGC)
1292{
1293#ifdef LOG_ENABLED
1294 return emR3RawExecuteInstructionWorker(pVM, rcGC, pszPrefix);
1295#else
1296 return emR3RawExecuteInstructionWorker(pVM, rcGC);
1297#endif
1298}
1299
1300/**
1301 * Executes one (or perhaps a few more) IO instruction(s).
1302 *
1303 * @returns VBox status code suitable for EM.
1304 * @param pVM VM handle.
1305 */
1306int emR3RawExecuteIOInstruction(PVM pVM)
1307{
1308 int rc;
1309 PCPUMCTX pCtx = pVM->em.s.pCtx;
1310
1311 STAM_PROFILE_START(&pVM->em.s.StatIOEmu, a);
1312
1313 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
1314 * as io instructions tend to come in packages of more than one
1315 */
1316 DISCPUSTATE Cpu;
1317 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &Cpu, "IO EMU");
1318 if (VBOX_SUCCESS(rc))
1319 {
1320 rc = VINF_EM_RAW_EMULATE_INSTR;
1321
1322 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
1323 {
1324 switch (Cpu.pCurInstr->opcode)
1325 {
1326 case OP_IN:
1327 {
1328 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatIn);
1329 rc = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1330 break;
1331 }
1332
1333 case OP_OUT:
1334 {
1335 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatOut);
1336 rc = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1337 break;
1338 }
1339 }
1340 }
1341 else if (Cpu.prefix & PREFIX_REP)
1342 {
1343 switch (Cpu.pCurInstr->opcode)
1344 {
1345 case OP_INSB:
1346 case OP_INSWD:
1347 {
1348 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatIn);
1349 rc = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1350 break;
1351 }
1352
1353 case OP_OUTSB:
1354 case OP_OUTSWD:
1355 {
1356 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatOut);
1357 rc = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1358 break;
1359 }
1360 }
1361 }
1362
1363 /*
1364 * Handled the I/O return codes.
1365 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1366 */
1367 if (IOM_SUCCESS(rc))
1368 {
1369 pCtx->rip += Cpu.opsize;
1370 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1371 return rc;
1372 }
1373
1374 if (rc == VINF_EM_RAW_GUEST_TRAP)
1375 {
1376 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1377 rc = emR3RawGuestTrap(pVM);
1378 return rc;
1379 }
1380 AssertMsg(rc != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
1381
1382 if (VBOX_FAILURE(rc))
1383 {
1384 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1385 return rc;
1386 }
1387 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RESCHEDULE_REM, ("rc=%Vrc\n", rc));
1388 }
1389 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1390 return emR3RawExecuteInstruction(pVM, "IO: ");
1391}
1392
1393
1394/**
1395 * Handle a guest context trap.
1396 *
1397 * @returns VBox status code suitable for EM.
1398 * @param pVM VM handle.
1399 */
1400static int emR3RawGuestTrap(PVM pVM)
1401{
1402 PCPUMCTX pCtx = pVM->em.s.pCtx;
1403
1404 /*
1405 * Get the trap info.
1406 */
1407 uint8_t u8TrapNo;
1408 TRPMEVENT enmType;
1409 RTGCUINT uErrorCode;
1410 RTGCUINTPTR uCR2;
1411 int rc = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1412 if (VBOX_FAILURE(rc))
1413 {
1414 AssertReleaseMsgFailed(("No trap! (rc=%Vrc)\n", rc));
1415 return rc;
1416 }
1417
1418 /* Traps can be directly forwarded in hardware accelerated mode. */
1419 if (HWACCMR3IsActive(pVM))
1420 {
1421#ifdef LOGGING_ENABLED
1422 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1423 DBGFR3DisasInstrCurrentLog(pVM, "Guest trap");
1424#endif
1425 return VINF_EM_RESCHEDULE_HWACC;
1426 }
1427
1428 /** Scan kernel code that traps; we might not get another chance. */
1429 if ( (pCtx->ss & X86_SEL_RPL) <= 1
1430 && !pCtx->eflags.Bits.u1VM)
1431 {
1432 Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
1433 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1434 }
1435
1436 if (u8TrapNo == 6) /* (#UD) Invalid opcode. */
1437 {
1438 DISCPUSTATE cpu;
1439
1440 /* If MONITOR & MWAIT are supported, then interpret them here. */
1441 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): ");
1442 if ( VBOX_SUCCESS(rc)
1443 && (cpu.pCurInstr->opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))
1444 {
1445 uint32_t u32Dummy, u32Features, u32ExtFeatures, size;
1446
1447 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Features);
1448
1449 if (u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)
1450 {
1451 rc = TRPMResetTrap(pVM);
1452 AssertRC(rc);
1453
1454 rc = EMInterpretInstructionCPU(pVM, &cpu, CPUMCTX2CORE(pCtx), 0, &size);
1455 if (VBOX_SUCCESS(rc))
1456 {
1457 pCtx->rip += cpu.opsize;
1458 return rc;
1459 }
1460 return emR3RawExecuteInstruction(pVM, "Monitor: ");
1461 }
1462 }
1463 }
1464 else if (u8TrapNo == 13) /* (#GP) Privileged exception */
1465 {
1466 DISCPUSTATE cpu;
1467
1468 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &cpu, "Guest Trap: ");
1469 if (VBOX_SUCCESS(rc) && (cpu.pCurInstr->optype & OPTYPE_PORTIO))
1470 {
1471 /*
1472 * We should really check the TSS for the IO bitmap, but it's not like this
1473 * lazy approach really makes things worse.
1474 */
1475 rc = TRPMResetTrap(pVM);
1476 AssertRC(rc);
1477 return emR3RawExecuteInstruction(pVM, "IO Guest Trap: ");
1478 }
1479 }
1480
1481#ifdef LOG_ENABLED
1482 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1483 DBGFR3DisasInstrCurrentLog(pVM, "Guest trap");
1484
1485 /* Get guest page information. */
1486 uint64_t fFlags = 0;
1487 RTGCPHYS GCPhys = 0;
1488 int rc2 = PGMGstGetPage(pVM, uCR2, &fFlags, &GCPhys);
1489 Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%VGp fFlags=%08llx %s %s %s%s rc2=%d\n",
1490 pCtx->cs, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0, (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags,
1491 fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S",
1492 fFlags & X86_PTE_RW ? "RW" : "R0", fFlags & X86_PTE_G ? " G" : "", rc2));
1493#endif
1494
1495 /*
1496 * #PG has CR2.
1497 * (Because of stuff like above we must set CR2 in a delayed fashion.)
1498 */
1499 if (u8TrapNo == 14 /* #PG */)
1500 pCtx->cr2 = uCR2;
1501
1502 return VINF_EM_RESCHEDULE_REM;
1503}
1504
1505
1506/**
1507 * Handle a ring switch trap.
1508 * Need to do statistics and to install patches. The result is going to REM.
1509 *
1510 * @returns VBox status code suitable for EM.
1511 * @param pVM VM handle.
1512 */
1513int emR3RawRingSwitch(PVM pVM)
1514{
1515 int rc;
1516 DISCPUSTATE Cpu;
1517 PCPUMCTX pCtx = pVM->em.s.pCtx;
1518
1519 /*
1520 * sysenter, syscall & callgate
1521 */
1522 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &Cpu, "RSWITCH: ");
1523 if (VBOX_SUCCESS(rc))
1524 {
1525 if (Cpu.pCurInstr->opcode == OP_SYSENTER)
1526 {
1527 if (pCtx->SysEnter.cs != 0)
1528 {
1529 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
1530 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
1531 if (VBOX_SUCCESS(rc))
1532 {
1533 DBGFR3DisasInstrCurrentLog(pVM, "Patched sysenter instruction");
1534 return VINF_EM_RESCHEDULE_RAW;
1535 }
1536 }
1537 }
1538
1539#ifdef VBOX_WITH_STATISTICS
1540 switch (Cpu.pCurInstr->opcode)
1541 {
1542 case OP_SYSENTER:
1543 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatSysEnter);
1544 break;
1545 case OP_SYSEXIT:
1546 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatSysExit);
1547 break;
1548 case OP_SYSCALL:
1549 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatSysCall);
1550 break;
1551 case OP_SYSRET:
1552 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatSysRet);
1553 break;
1554 }
1555#endif
1556 }
1557 else
1558 AssertRC(rc);
1559
1560 /* go to the REM to emulate a single instruction */
1561 return emR3RawExecuteInstruction(pVM, "RSWITCH: ");
1562}
1563
1564/**
1565 * Handle a trap (\#PF or \#GP) in patch code
1566 *
1567 * @returns VBox status code suitable for EM.
1568 * @param pVM VM handle.
1569 * @param pCtx CPU context
1570 * @param gcret GC return code
1571 */
1572int emR3PatchTrap(PVM pVM, PCPUMCTX pCtx, int gcret)
1573{
1574 uint8_t u8TrapNo;
1575 int rc;
1576 TRPMEVENT enmType;
1577 RTGCUINT uErrorCode;
1578 RTGCUINTPTR uCR2;
1579
1580 Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));
1581
1582 if (gcret == VINF_PATM_PATCH_INT3)
1583 {
1584 u8TrapNo = 3;
1585 uCR2 = 0;
1586 uErrorCode = 0;
1587 }
1588 else if (gcret == VINF_PATM_PATCH_TRAP_GP)
1589 {
1590 /* No active trap in this case. Kind of ugly. */
1591 u8TrapNo = X86_XCPT_GP;
1592 uCR2 = 0;
1593 uErrorCode = 0;
1594 }
1595 else
1596 {
1597 rc = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1598 if (VBOX_FAILURE(rc))
1599 {
1600 AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Vrc) gcret=%Vrc\n", rc, gcret));
1601 return rc;
1602 }
1603 /* Reset the trap as we'll execute the original instruction again. */
1604 TRPMResetTrap(pVM);
1605 }
1606
1607 /*
1608 * Deal with traps inside patch code.
1609 * (This code won't run outside GC.)
1610 */
1611 if (u8TrapNo != 1)
1612 {
1613#ifdef LOG_ENABLED
1614 DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");
1615 DBGFR3DisasInstrCurrentLog(pVM, "Patch code");
1616
1617 DISCPUSTATE Cpu;
1618 int rc;
1619
1620 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->eip, &Cpu, "Patch code: ");
1621 if ( VBOX_SUCCESS(rc)
1622 && Cpu.pCurInstr->opcode == OP_IRET)
1623 {
1624 uint32_t eip, selCS, uEFlags;
1625
1626 /* Iret crashes are bad as we have already changed the flags on the stack */
1627 rc = PGMPhysReadGCPtr(pVM, &eip, pCtx->esp, 4);
1628 rc |= PGMPhysReadGCPtr(pVM, &selCS, pCtx->esp+4, 4);
1629 rc |= PGMPhysReadGCPtr(pVM, &uEFlags, pCtx->esp+8, 4);
1630 if (rc == VINF_SUCCESS)
1631 {
1632 if ( (uEFlags & X86_EFL_VM)
1633 || (selCS & X86_SEL_RPL) == 3)
1634 {
1635 uint32_t selSS, esp;
1636
1637 rc |= PGMPhysReadGCPtr(pVM, &esp, pCtx->esp + 12, 4);
1638 rc |= PGMPhysReadGCPtr(pVM, &selSS, pCtx->esp + 16, 4);
1639
1640 if (uEFlags & X86_EFL_VM)
1641 {
1642 uint32_t selDS, selES, selFS, selGS;
1643 rc = PGMPhysReadGCPtr(pVM, &selES, pCtx->esp + 20, 4);
1644 rc |= PGMPhysReadGCPtr(pVM, &selDS, pCtx->esp + 24, 4);
1645 rc |= PGMPhysReadGCPtr(pVM, &selFS, pCtx->esp + 28, 4);
1646 rc |= PGMPhysReadGCPtr(pVM, &selGS, pCtx->esp + 32, 4);
1647 if (rc == VINF_SUCCESS)
1648 {
1649 Log(("Patch code: IRET->VM stack frame: return address %04X:%VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));
1650 Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
1651 }
1652 }
1653 else
1654 Log(("Patch code: IRET stack frame: return address %04X:%VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));
1655 }
1656 else
1657 Log(("Patch code: IRET stack frame: return address %04X:%VGv eflags=%08x\n", selCS, eip, uEFlags));
1658 }
1659 }
1660#endif /* LOG_ENABLED */
1661 Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
1662 pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));
1663
1664 RTGCPTR pNewEip;
1665 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1666 switch (rc)
1667 {
1668 /*
1669 * Execute the faulting instruction.
1670 */
1671 case VINF_SUCCESS:
1672 {
1673 /** @todo execute a whole block */
1674 Log(("emR3PatchTrap: Executing faulting instruction at new address %VGv\n", pNewEip));
1675 if (!(pVM->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1676 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1677
1678 pCtx->eip = pNewEip;
1679 AssertRelease(pCtx->eip);
1680
1681 if (pCtx->eflags.Bits.u1IF)
1682 {
1683 /* Windows XP lets irets fault intentionally and then takes action based on the opcode; an
1684 * int3 patch overwrites it and leads to blue screens. Remove the patch in this case.
1685 */
1686 if ( u8TrapNo == X86_XCPT_GP
1687 && PATMIsInt3Patch(pVM, pCtx->eip, NULL, NULL))
1688 {
1689 /** @todo move to PATMR3HandleTrap */
1690 Log(("Possible Windows XP iret fault at %VGv\n", pCtx->eip));
1691 PATMR3RemovePatch(pVM, pCtx->eip);
1692 }
1693
1694 /** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */
1695 /* Note: possibly because a reschedule is required (e.g. iret to V86 code) */
1696
1697 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1698 /* Interrupts are enabled; just go back to the original instruction.
1699 return VINF_SUCCESS; */
1700 }
1701 return VINF_EM_RESCHEDULE_REM;
1702 }
1703
1704 /*
1705 * One instruction.
1706 */
1707 case VINF_PATCH_EMULATE_INSTR:
1708 Log(("emR3PatchTrap: Emulate patched instruction at %VGv IF=%d VMIF=%x\n",
1709 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1710 pCtx->eip = pNewEip;
1711 AssertRelease(pCtx->eip);
1712 return emR3RawExecuteInstruction(pVM, "PATCHEMUL: ");
1713
1714 /*
1715 * The patch was disabled, hand it to the REM.
1716 */
1717 case VERR_PATCH_DISABLED:
1718 if (!(pVM->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1719 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1720 pCtx->eip = pNewEip;
1721 AssertRelease(pCtx->eip);
1722
1723 if (pCtx->eflags.Bits.u1IF)
1724 {
1725 /*
1726 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1727 */
1728 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1729 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1730 }
1731 return VINF_EM_RESCHEDULE_REM;
1732
1733 /* Force continued patch exection; usually due to write monitored stack. */
1734 case VINF_PATCH_CONTINUE:
1735 return VINF_SUCCESS;
1736
1737 /*
1738 * Anything else is *fatal*.
1739 */
1740 default:
1741 AssertReleaseMsgFailed(("Unknown return code %Vrc from PATMR3HandleTrap!\n", rc));
1742 return VERR_INTERNAL_ERROR;
1743 }
1744 }
1745 return VINF_SUCCESS;
1746}
1747
1748
1749/**
1750 * Handle a privileged instruction.
1751 *
1752 * @returns VBox status code suitable for EM.
1753 * @param pVM VM handle.
1754 */
1755int emR3RawPrivileged(PVM pVM)
1756{
1757 STAM_PROFILE_START(&pVM->em.s.StatPrivEmu, a);
1758 PCPUMCTX pCtx = pVM->em.s.pCtx;
1759
1760 Assert(!pCtx->eflags.Bits.u1VM);
1761
1762 if (PATMIsEnabled(pVM))
1763 {
1764 /*
1765 * Check if in patch code.
1766 */
1767 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
1768 {
1769#ifdef LOG_ENABLED
1770 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1771#endif
1772 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));
1773 return VERR_EM_RAW_PATCH_CONFLICT;
1774 }
1775 if ( (pCtx->ss & X86_SEL_RPL) == 0
1776 && !pCtx->eflags.Bits.u1VM
1777 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
1778 {
1779 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
1780 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
1781 if (VBOX_SUCCESS(rc))
1782 {
1783#ifdef LOG_ENABLED
1784 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1785#endif
1786 DBGFR3DisasInstrCurrentLog(pVM, "Patched privileged instruction");
1787 return VINF_SUCCESS;
1788 }
1789 }
1790 }
1791
1792#ifdef LOG_ENABLED
1793 if (!PATMIsPatchGCAddr(pVM, pCtx->eip))
1794 {
1795 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1796 DBGFR3DisasInstrCurrentLog(pVM, "Privileged instr: ");
1797 }
1798#endif
1799
1800 /*
1801 * Instruction statistics and logging.
1802 */
1803 DISCPUSTATE Cpu;
1804 int rc;
1805
1806 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &Cpu, "PRIV: ");
1807 if (VBOX_SUCCESS(rc))
1808 {
1809#ifdef VBOX_WITH_STATISTICS
1810 PEMSTATS pStats = pVM->em.s.CTX_SUFF(pStats);
1811 switch (Cpu.pCurInstr->opcode)
1812 {
1813 case OP_INVLPG:
1814 STAM_COUNTER_INC(&pStats->StatInvlpg);
1815 break;
1816 case OP_IRET:
1817 STAM_COUNTER_INC(&pStats->StatIret);
1818 break;
1819 case OP_CLI:
1820 STAM_COUNTER_INC(&pStats->StatCli);
1821 emR3RecordCli(pVM, pCtx->rip);
1822 break;
1823 case OP_STI:
1824 STAM_COUNTER_INC(&pStats->StatSti);
1825 break;
1826 case OP_INSB:
1827 case OP_INSWD:
1828 case OP_IN:
1829 case OP_OUTSB:
1830 case OP_OUTSWD:
1831 case OP_OUT:
1832 AssertMsgFailed(("Unexpected privileged exception due to port IO\n"));
1833 break;
1834
1835 case OP_MOV_CR:
1836 if (Cpu.param1.flags & USE_REG_GEN32)
1837 {
1838 //read
1839 Assert(Cpu.param2.flags & USE_REG_CR);
1840 Assert(Cpu.param2.base.reg_ctrl <= USE_REG_CR4);
1841 STAM_COUNTER_INC(&pStats->StatMovReadCR[Cpu.param2.base.reg_ctrl]);
1842 }
1843 else
1844 {
1845 //write
1846 Assert(Cpu.param1.flags & USE_REG_CR);
1847 Assert(Cpu.param1.base.reg_ctrl <= USE_REG_CR4);
1848 STAM_COUNTER_INC(&pStats->StatMovWriteCR[Cpu.param1.base.reg_ctrl]);
1849 }
1850 break;
1851
1852 case OP_MOV_DR:
1853 STAM_COUNTER_INC(&pStats->StatMovDRx);
1854 break;
1855 case OP_LLDT:
1856 STAM_COUNTER_INC(&pStats->StatMovLldt);
1857 break;
1858 case OP_LIDT:
1859 STAM_COUNTER_INC(&pStats->StatMovLidt);
1860 break;
1861 case OP_LGDT:
1862 STAM_COUNTER_INC(&pStats->StatMovLgdt);
1863 break;
1864 case OP_SYSENTER:
1865 STAM_COUNTER_INC(&pStats->StatSysEnter);
1866 break;
1867 case OP_SYSEXIT:
1868 STAM_COUNTER_INC(&pStats->StatSysExit);
1869 break;
1870 case OP_SYSCALL:
1871 STAM_COUNTER_INC(&pStats->StatSysCall);
1872 break;
1873 case OP_SYSRET:
1874 STAM_COUNTER_INC(&pStats->StatSysRet);
1875 break;
1876 case OP_HLT:
1877 STAM_COUNTER_INC(&pStats->StatHlt);
1878 break;
1879 default:
1880 STAM_COUNTER_INC(&pStats->StatMisc);
1881 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->opcode));
1882 break;
1883 }
1884#endif /* VBOX_WITH_STATISTICS */
1885 if ( (pCtx->ss & X86_SEL_RPL) == 0
1886 && !pCtx->eflags.Bits.u1VM
1887 && SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT)
1888 {
1889 uint32_t size;
1890
1891 STAM_PROFILE_START(&pVM->em.s.StatPrivEmu, a);
1892 switch (Cpu.pCurInstr->opcode)
1893 {
1894 case OP_CLI:
1895 pCtx->eflags.u32 &= ~X86_EFL_IF;
1896 Assert(Cpu.opsize == 1);
1897 pCtx->rip += Cpu.opsize;
1898 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1899 return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */
1900
1901 case OP_STI:
1902 pCtx->eflags.u32 |= X86_EFL_IF;
1903 EMSetInhibitInterruptsPC(pVM, pCtx->rip + Cpu.opsize);
1904 Assert(Cpu.opsize == 1);
1905 pCtx->rip += Cpu.opsize;
1906 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1907 return VINF_SUCCESS;
1908
1909 case OP_HLT:
1910 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
1911 {
1912 PATMTRANSSTATE enmState;
1913 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
1914
1915 if (enmState == PATMTRANS_OVERWRITTEN)
1916 {
1917 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
1918 Assert(rc == VERR_PATCH_DISABLED);
1919 /* Conflict detected, patch disabled */
1920 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %VGv\n", pCtx->eip));
1921
1922 enmState = PATMTRANS_SAFE;
1923 }
1924
1925 /* The translation had better be successful. Otherwise we can't recover. */
1926 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %VGv\n", pCtx->eip));
1927 if (enmState != PATMTRANS_OVERWRITTEN)
1928 pCtx->eip = pOrgInstrGC;
1929 }
1930 /* no break; we could just return VINF_EM_HALT here */
1931
1932 case OP_MOV_CR:
1933 case OP_MOV_DR:
1934#ifdef LOG_ENABLED
1935 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1936 {
1937 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1938 DBGFR3DisasInstrCurrentLog(pVM, "Privileged instr: ");
1939 }
1940#endif
1941
1942 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1943 if (VBOX_SUCCESS(rc))
1944 {
1945 pCtx->rip += Cpu.opsize;
1946 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1947
1948 if ( Cpu.pCurInstr->opcode == OP_MOV_CR
1949 && Cpu.param1.flags == USE_REG_CR /* write */
1950 )
1951 {
1952 /* Deal with CR0 updates inside patch code that force
1953 * us to go to the recompiler.
1954 */
1955 if ( PATMIsPatchGCAddr(pVM, pCtx->rip)
1956 && (pCtx->cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE))
1957 {
1958 PATMTRANSSTATE enmState;
1959 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->rip, &enmState);
1960
1961 Assert(pCtx->eflags.Bits.u1IF == 0);
1962 Log(("Force recompiler switch due to cr0 (%VGp) update\n", pCtx->cr0));
1963 if (enmState == PATMTRANS_OVERWRITTEN)
1964 {
1965 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
1966 Assert(rc == VERR_PATCH_DISABLED);
1967 /* Conflict detected, patch disabled */
1968 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %VGv\n", pCtx->rip));
1969 enmState = PATMTRANS_SAFE;
1970 }
1971 /* The translation had better be successful. Otherwise we can't recover. */
1972 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %VGv\n", pCtx->rip));
1973 if (enmState != PATMTRANS_OVERWRITTEN)
1974 pCtx->rip = pOrgInstrGC;
1975 }
1976
1977 /* Reschedule is necessary as the execution/paging mode might have changed. */
1978 return VINF_EM_RESCHEDULE;
1979 }
1980 return rc; /* can return VINF_EM_HALT as well. */
1981 }
1982 AssertMsgReturn(rc == VERR_EM_INTERPRETER, ("%Vrc\n", rc), rc);
1983 break; /* fall back to the recompiler */
1984 }
1985 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1986 }
1987 }
1988
1989 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1990 return emR3PatchTrap(pVM, pCtx, VINF_PATM_PATCH_TRAP_GP);
1991
1992 return emR3RawExecuteInstruction(pVM, "PRIV");
1993}
1994
1995
1996/**
1997 * Update the forced rawmode execution modifier.
1998 *
1999 * This function is called when we're returning from the raw-mode loop(s). If we're
2000 * in patch code, it will set a flag forcing execution to be resumed in raw-mode,
2001 * if not in patch code, the flag will be cleared.
2002 *
2003 * We should never interrupt patch code while it's being executed. Cli patches can
2004 * contain big code blocks, but they are always executed with IF=0. Other patches
2005 * replace single instructions and should be atomic.
2006 *
2007 * @returns Updated rc.
2008 *
2009 * @param pVM The VM handle.
2010 * @param pCtx The guest CPU context.
2011 * @param rc The result code.
2012 */
2013DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PCPUMCTX pCtx, int rc)
2014{
2015 if (PATMIsPatchGCAddr(pVM, pCtx->eip)) /** @todo check cs selector base/type */
2016 {
2017 /* ignore reschedule attempts. */
2018 switch (rc)
2019 {
2020 case VINF_EM_RESCHEDULE:
2021 case VINF_EM_RESCHEDULE_REM:
2022 rc = VINF_SUCCESS;
2023 break;
2024 }
2025 pVM->em.s.fForceRAW = true;
2026 }
2027 else
2028 pVM->em.s.fForceRAW = false;
2029 return rc;
2030}
2031
2032
2033/**
2034 * Process a subset of the raw-mode return code.
2035 *
2036 * Since we have to share this with raw-mode single stepping, this inline
2037 * function has been created to avoid code duplication.
2038 *
2039 * @returns VINF_SUCCESS if it's ok to continue raw mode.
2040 * @returns VBox status code to return to the EM main loop.
2041 *
2042 * @param pVM The VM handle
2043 * @param rc The return code.
2044 * @param pCtx The guest cpu context.
2045 */
2046DECLINLINE(int) emR3RawHandleRC(PVM pVM, PCPUMCTX pCtx, int rc)
2047{
2048 switch (rc)
2049 {
2050 /*
2051 * Common & simple ones.
2052 */
2053 case VINF_SUCCESS:
2054 break;
2055 case VINF_EM_RESCHEDULE_RAW:
2056 case VINF_EM_RESCHEDULE_HWACC:
2057 case VINF_EM_RAW_INTERRUPT:
2058 case VINF_EM_RAW_TO_R3:
2059 case VINF_EM_RAW_TIMER_PENDING:
2060 case VINF_EM_PENDING_REQUEST:
2061 rc = VINF_SUCCESS;
2062 break;
2063
2064 /*
2065 * Privileged instruction.
2066 */
2067 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2068 case VINF_PATM_PATCH_TRAP_GP:
2069 rc = emR3RawPrivileged(pVM);
2070 break;
2071
2072 /*
2073 * Got a trap which needs dispatching.
2074 */
2075 case VINF_EM_RAW_GUEST_TRAP:
2076 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2077 {
2078 AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVM)));
2079 rc = VERR_EM_RAW_PATCH_CONFLICT;
2080 break;
2081 }
2082
2083 Assert(TRPMHasTrap(pVM));
2084 Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2085
2086 if (TRPMHasTrap(pVM))
2087 {
2088 uint8_t u8Interrupt;
2089 RTGCUINT uErrorCode;
2090 TRPMERRORCODE enmError = TRPM_TRAP_NO_ERRORCODE;
2091
2092 rc = TRPMQueryTrapAll(pVM, &u8Interrupt, NULL, &uErrorCode, NULL);
2093 AssertRC(rc);
2094
2095 if (uErrorCode != ~0U)
2096 enmError = TRPM_TRAP_HAS_ERRORCODE;
2097
2098 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
2099 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
2100 {
2101 CSAMR3CheckGates(pVM, u8Interrupt, 1);
2102 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
2103
2104 /** If it was successful, then we could go back to raw mode. */
2105 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER)
2106 {
2107 /* Must check pending forced actions as our IDT or GDT might be out of sync */
2108 EMR3CheckRawForcedActions(pVM);
2109
2110 rc = TRPMForwardTrap(pVM, CPUMCTX2CORE(pCtx), u8Interrupt, uErrorCode, enmError, TRPM_TRAP, -1);
2111 if (rc == VINF_SUCCESS /* Don't use VBOX_SUCCESS */)
2112 {
2113 TRPMResetTrap(pVM);
2114 return VINF_EM_RESCHEDULE_RAW;
2115 }
2116 }
2117 }
2118 }
2119 rc = emR3RawGuestTrap(pVM);
2120 break;
2121
2122 /*
2123 * Trap in patch code.
2124 */
2125 case VINF_PATM_PATCH_TRAP_PF:
2126 case VINF_PATM_PATCH_INT3:
2127 rc = emR3PatchTrap(pVM, pCtx, rc);
2128 break;
2129
2130 case VINF_PATM_DUPLICATE_FUNCTION:
2131 Assert(PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2132 rc = PATMR3DuplicateFunctionRequest(pVM, pCtx);
2133 AssertRC(rc);
2134 rc = VINF_SUCCESS;
2135 break;
2136
2137 case VINF_PATM_CHECK_PATCH_PAGE:
2138 rc = PATMR3HandleMonitoredPage(pVM);
2139 AssertRC(rc);
2140 rc = VINF_SUCCESS;
2141 break;
2142
2143 /*
2144 * Patch manager.
2145 */
2146 case VERR_EM_RAW_PATCH_CONFLICT:
2147 AssertReleaseMsgFailed(("%Vrc handling is not yet implemented\n", rc));
2148 break;
2149
2150 /*
2151 * Memory mapped I/O access - attempt to patch the instruction
2152 */
2153 case VINF_PATM_HC_MMIO_PATCH_READ:
2154 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
2155 PATMFL_MMIO_ACCESS | ((SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0));
2156 if (VBOX_FAILURE(rc))
2157 rc = emR3RawExecuteInstruction(pVM, "MMIO");
2158 break;
2159
2160 case VINF_PATM_HC_MMIO_PATCH_WRITE:
2161 AssertFailed(); /* not yet implemented. */
2162 rc = emR3RawExecuteInstruction(pVM, "MMIO");
2163 break;
2164
2165 /*
2166 * Conflict or out of page tables.
2167 *
2168 * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
2169 * do here is to execute the pending forced actions.
2170 */
2171 case VINF_PGM_SYNC_CR3:
2172 AssertMsg(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL),
2173 ("VINF_PGM_SYNC_CR3 and no VM_FF_PGM_SYNC_CR3*!\n"));
2174 rc = VINF_SUCCESS;
2175 break;
2176
2177 /*
2178 * Paging mode change.
2179 */
2180 case VINF_PGM_CHANGE_MODE:
2181 rc = PGMChangeMode(pVM, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2182 if (VBOX_SUCCESS(rc))
2183 rc = VINF_EM_RESCHEDULE;
2184 break;
2185
2186 /*
2187 * CSAM wants to perform a task in ring-3. It has set an FF action flag.
2188 */
2189 case VINF_CSAM_PENDING_ACTION:
2190 rc = VINF_SUCCESS;
2191 break;
2192
2193 /*
2194 * Invoked Interrupt gate - must directly (!) go to the recompiler.
2195 */
2196 case VINF_EM_RAW_INTERRUPT_PENDING:
2197 case VINF_EM_RAW_RING_SWITCH_INT:
2198 {
2199 uint8_t u8Interrupt;
2200
2201 Assert(TRPMHasTrap(pVM));
2202 Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2203
2204 if (TRPMHasTrap(pVM))
2205 {
2206 u8Interrupt = TRPMGetTrapNo(pVM);
2207
2208 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
2209 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
2210 {
2211 CSAMR3CheckGates(pVM, u8Interrupt, 1);
2212 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
2213 /* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
2214 }
2215 }
2216 rc = VINF_EM_RESCHEDULE_REM;
2217 break;
2218 }
2219
2220 /*
2221 * Other ring switch types.
2222 */
2223 case VINF_EM_RAW_RING_SWITCH:
2224 rc = emR3RawRingSwitch(pVM);
2225 break;
2226
2227 /*
2228 * REMGCNotifyInvalidatePage() failed because of overflow.
2229 */
2230 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
2231 Assert((pCtx->ss & X86_SEL_RPL) != 1);
2232 REMR3ReplayInvalidatedPages(pVM);
2233 rc = VINF_SUCCESS;
2234 break;
2235
2236 /*
2237 * I/O Port access - emulate the instruction.
2238 */
2239 case VINF_IOM_HC_IOPORT_READ:
2240 case VINF_IOM_HC_IOPORT_WRITE:
2241 rc = emR3RawExecuteIOInstruction(pVM);
2242 break;
2243
2244 /*
2245 * Memory mapped I/O access - emulate the instruction.
2246 */
2247 case VINF_IOM_HC_MMIO_READ:
2248 case VINF_IOM_HC_MMIO_WRITE:
2249 case VINF_IOM_HC_MMIO_READ_WRITE:
2250 rc = emR3RawExecuteInstruction(pVM, "MMIO");
2251 break;
2252
2253 /*
2254 * Execute instruction.
2255 */
2256 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
2257 rc = emR3RawExecuteInstruction(pVM, "LDT FAULT: ");
2258 break;
2259 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
2260 rc = emR3RawExecuteInstruction(pVM, "GDT FAULT: ");
2261 break;
2262 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
2263 rc = emR3RawExecuteInstruction(pVM, "IDT FAULT: ");
2264 break;
2265 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
2266 rc = emR3RawExecuteInstruction(pVM, "TSS FAULT: ");
2267 break;
2268 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
2269 rc = emR3RawExecuteInstruction(pVM, "PD FAULT: ");
2270 break;
2271
2272 case VINF_EM_RAW_EMULATE_INSTR_HLT:
2273 /** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */
2274 rc = emR3RawPrivileged(pVM);
2275 break;
2276
2277 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
2278 rc = emR3RawExecuteInstruction(pVM, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
2279 break;
2280
2281 case VINF_EM_RAW_EMULATE_INSTR:
2282 case VINF_PATCH_EMULATE_INSTR:
2283 rc = emR3RawExecuteInstruction(pVM, "EMUL: ");
2284 break;
2285
2286 /*
2287 * Stale selector and iret traps => REM.
2288 */
2289 case VINF_EM_RAW_STALE_SELECTOR:
2290 case VINF_EM_RAW_IRET_TRAP:
2291 /* We will not go to the recompiler if EIP points to patch code. */
2292 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2293 {
2294 pCtx->eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pCtx->eip, 0);
2295 }
2296 LogFlow(("emR3RawHandleRC: %Vrc -> %Vrc\n", rc, VINF_EM_RESCHEDULE_REM));
2297 rc = VINF_EM_RESCHEDULE_REM;
2298 break;
2299
2300 /*
2301 * Up a level.
2302 */
2303 case VINF_EM_TERMINATE:
2304 case VINF_EM_OFF:
2305 case VINF_EM_RESET:
2306 case VINF_EM_SUSPEND:
2307 case VINF_EM_HALT:
2308 case VINF_EM_RESUME:
2309 case VINF_EM_RESCHEDULE:
2310 case VINF_EM_RESCHEDULE_REM:
2311 break;
2312
2313 /*
2314 * Up a level and invoke the debugger.
2315 */
2316 case VINF_EM_DBG_STEPPED:
2317 case VINF_EM_DBG_BREAKPOINT:
2318 case VINF_EM_DBG_STEP:
2319 case VINF_EM_DBG_HYPER_ASSERTION:
2320 case VINF_EM_DBG_HYPER_BREAKPOINT:
2321 case VINF_EM_DBG_HYPER_STEPPED:
2322 case VINF_EM_DBG_STOP:
2323 break;
2324
2325 /*
2326 * Up a level, dump and debug.
2327 */
2328 case VERR_TRPM_DONT_PANIC:
2329 case VERR_TRPM_PANIC:
2330 break;
2331
2332 case VERR_VMX_INVALID_VMCS_FIELD:
2333 case VERR_VMX_INVALID_VMCS_PTR:
2334 case VERR_VMX_INVALID_VMXON_PTR:
2335 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE:
2336 case VERR_VMX_UNEXPECTED_EXCEPTION:
2337 case VERR_VMX_UNEXPECTED_EXIT_CODE:
2338 case VERR_VMX_INVALID_GUEST_STATE:
2339 HWACCMR3CheckError(pVM, rc);
2340 break;
2341 /*
2342 * Anything which is not known to us means an internal error
2343 * and the termination of the VM!
2344 */
2345 default:
2346 AssertMsgFailed(("Unknown GC return code: %Vra\n", rc));
2347 break;
2348 }
2349 return rc;
2350}
2351
2352
2353/**
2354 * Check for pending raw actions
2355 *
2356 * @returns VBox status code.
2357 * @param pVM The VM to operate on.
2358 */
2359EMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM)
2360{
2361 return emR3RawForcedActions(pVM, pVM->em.s.pCtx);
2362}
2363
2364
2365/**
2366 * Process raw-mode specific forced actions.
2367 *
2368 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
2369 *
2370 * @returns VBox status code.
2371 * Only the normal success/failure stuff, no VINF_EM_*.
2372 * @param pVM The VM handle.
2373 * @param pCtx The guest CPUM register context.
2374 */
2375static int emR3RawForcedActions(PVM pVM, PCPUMCTX pCtx)
2376{
2377 /*
2378 * Note that the order is *vitally* important!
2379 * Also note that SELMR3UpdateFromCPUM may trigger VM_FF_SELM_SYNC_TSS.
2380 */
2381
2382
2383 /*
2384 * Sync selector tables.
2385 */
2386 if (VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT))
2387 {
2388 int rc = SELMR3UpdateFromCPUM(pVM);
2389 if (VBOX_FAILURE(rc))
2390 return rc;
2391 }
2392
2393 /*
2394 * Sync IDT.
2395 */
2396 if (VM_FF_ISSET(pVM, VM_FF_TRPM_SYNC_IDT))
2397 {
2398 int rc = TRPMR3SyncIDT(pVM);
2399 if (VBOX_FAILURE(rc))
2400 return rc;
2401 }
2402
2403 /*
2404 * Sync TSS.
2405 */
2406 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS))
2407 {
2408 int rc = SELMR3SyncTSS(pVM);
2409 if (VBOX_FAILURE(rc))
2410 return rc;
2411 }
2412
2413 /*
2414 * Sync page directory.
2415 */
2416 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
2417 {
2418 int rc = PGMSyncCR3(pVM, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
2419 if (VBOX_FAILURE(rc))
2420 return rc;
2421
2422 Assert(!VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT));
2423
2424 /* Prefetch pages for EIP and ESP */
2425 /** @todo This is rather expensive. Should investigate if it really helps at all. */
2426 rc = PGMPrefetchPage(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
2427 if (rc == VINF_SUCCESS)
2428 rc = PGMPrefetchPage(pVM, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
2429 if (rc != VINF_SUCCESS)
2430 {
2431 if (rc != VINF_PGM_SYNC_CR3)
2432 return rc;
2433 rc = PGMSyncCR3(pVM, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
2434 if (VBOX_FAILURE(rc))
2435 return rc;
2436 }
2437 /** @todo maybe prefetch the supervisor stack page as well */
2438 }
2439
2440 /*
2441 * Allocate handy pages (just in case the above actions have consumed some pages).
2442 */
2443 if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
2444 {
2445 int rc = PGMR3PhysAllocateHandyPages(pVM);
2446 if (VBOX_FAILURE(rc))
2447 return rc;
2448 }
2449
2450 return VINF_SUCCESS;
2451}
2452
2453
2454/**
2455 * Executes raw code.
2456 *
2457 * This function contains the raw-mode version of the inner
2458 * execution loop (the outer loop being in EMR3ExecuteVM()).
2459 *
2460 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
2461 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2462 *
2463 * @param pVM VM handle.
2464 * @param pfFFDone Where to store an indicator telling whether or not
2465 * FFs were done before returning.
2466 */
2467static int emR3RawExecute(PVM pVM, bool *pfFFDone)
2468{
2469 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatRAWTotal, a);
2470
2471 int rc = VERR_INTERNAL_ERROR;
2472 PCPUMCTX pCtx = pVM->em.s.pCtx;
2473 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));
2474 pVM->em.s.fForceRAW = false;
2475 *pfFFDone = false;
2476
2477
2478 /*
2479 *
2480 * Spin till we get a forced action or raw mode status code resulting in
2481 * in anything but VINF_SUCCESS or VINF_EM_RESCHEDULE_RAW.
2482 *
2483 */
2484 for (;;)
2485 {
2486 STAM_PROFILE_ADV_START(&pVM->em.s.StatRAWEntry, b);
2487
2488 /*
2489 * Check various preconditions.
2490 */
2491#ifdef VBOX_STRICT
2492 Assert(REMR3QueryPendingInterrupt(pVM) == REM_NO_PENDING_IRQ);
2493 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) == 3 || (pCtx->ss & X86_SEL_RPL) == 0);
2494 AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)
2495 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
2496 ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
2497 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
2498 && PGMR3MapHasConflicts(pVM, pCtx->cr3, pVM->fRawR0Enabled))
2499 {
2500 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2501 return VERR_INTERNAL_ERROR;
2502 }
2503#endif /* VBOX_STRICT */
2504
2505 /*
2506 * Process high priority pre-execution raw-mode FFs.
2507 */
2508 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2509 {
2510 rc = emR3RawForcedActions(pVM, pCtx);
2511 if (VBOX_FAILURE(rc))
2512 break;
2513 }
2514
2515 /*
2516 * If we're going to execute ring-0 code, the guest state needs to
2517 * be modified a bit and some of the state components (IF, SS/CS RPL,
2518 * and perhaps EIP) needs to be stored with PATM.
2519 */
2520 rc = CPUMRawEnter(pVM, NULL);
2521 if (rc != VINF_SUCCESS)
2522 {
2523 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWEntry, b);
2524 break;
2525 }
2526
2527 /*
2528 * Scan code before executing it. Don't bother with user mode or V86 code
2529 */
2530 if ( (pCtx->ss & X86_SEL_RPL) <= 1
2531 && !pCtx->eflags.Bits.u1VM
2532 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
2533 {
2534 STAM_PROFILE_ADV_SUSPEND(&pVM->em.s.StatRAWEntry, b);
2535 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
2536 STAM_PROFILE_ADV_RESUME(&pVM->em.s.StatRAWEntry, b);
2537 }
2538
2539#ifdef LOG_ENABLED
2540 /*
2541 * Log important stuff before entering GC.
2542 */
2543 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);
2544 if (pCtx->eflags.Bits.u1VM)
2545 Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2546 else if ((pCtx->ss & X86_SEL_RPL) == 1)
2547 {
2548 bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);
2549 Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL), fCSAMScanned));
2550 }
2551 else if ((pCtx->ss & X86_SEL_RPL) == 3)
2552 Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2553#endif /* LOG_ENABLED */
2554
2555
2556
2557 /*
2558 * Execute the code.
2559 */
2560 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWEntry, b);
2561 STAM_PROFILE_START(&pVM->em.s.StatRAWExec, c);
2562 VMMR3Unlock(pVM);
2563 rc = VMMR3RawRunGC(pVM);
2564 VMMR3Lock(pVM);
2565 STAM_PROFILE_STOP(&pVM->em.s.StatRAWExec, c);
2566 STAM_PROFILE_ADV_START(&pVM->em.s.StatRAWTail, d);
2567
2568 LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL)));
2569 LogFlow(("VMMR3RawRunGC returned %Vrc\n", rc));
2570
2571
2572
2573 /*
2574 * Restore the real CPU state and deal with high priority post
2575 * execution FFs before doing anything else.
2576 */
2577 rc = CPUMRawLeave(pVM, NULL, rc);
2578 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
2579 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
2580 rc = emR3HighPriorityPostForcedActions(pVM, rc);
2581
2582#ifdef VBOX_STRICT
2583 /*
2584 * Assert TSS consistency & rc vs patch code.
2585 */
2586 if ( !VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_TSS | VM_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
2587 && EMIsRawRing0Enabled(pVM))
2588 SELMR3CheckTSS(pVM);
2589 switch (rc)
2590 {
2591 case VINF_SUCCESS:
2592 case VINF_EM_RAW_INTERRUPT:
2593 case VINF_PATM_PATCH_TRAP_PF:
2594 case VINF_PATM_PATCH_TRAP_GP:
2595 case VINF_PATM_PATCH_INT3:
2596 case VINF_PATM_CHECK_PATCH_PAGE:
2597 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2598 case VINF_EM_RAW_GUEST_TRAP:
2599 case VINF_EM_RESCHEDULE_RAW:
2600 break;
2601
2602 default:
2603 if (PATMIsPatchGCAddr(pVM, pCtx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))
2604 LogIt(NULL, 0, LOG_GROUP_PATM, ("Patch code interrupted at %VRv for reason %Vrc\n", (RTRCPTR)CPUMGetGuestEIP(pVM), rc));
2605 break;
2606 }
2607 /*
2608 * Let's go paranoid!
2609 */
2610 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
2611 && PGMR3MapHasConflicts(pVM, pCtx->cr3, pVM->fRawR0Enabled))
2612 {
2613 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2614 return VERR_INTERNAL_ERROR;
2615 }
2616#endif /* VBOX_STRICT */
2617
2618 /*
2619 * Process the returned status code.
2620 */
2621 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2622 {
2623 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTail, d);
2624 break;
2625 }
2626 rc = emR3RawHandleRC(pVM, pCtx, rc);
2627 if (rc != VINF_SUCCESS)
2628 {
2629 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
2630 if (rc != VINF_SUCCESS)
2631 {
2632 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTail, d);
2633 break;
2634 }
2635 }
2636
2637 /*
2638 * Check and execute forced actions.
2639 */
2640#ifdef VBOX_HIGH_RES_TIMERS_HACK
2641 TMTimerPoll(pVM);
2642#endif
2643 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTail, d);
2644 if (VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2645 {
2646 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);
2647
2648 STAM_REL_PROFILE_ADV_SUSPEND(&pVM->em.s.StatRAWTotal, a);
2649 rc = emR3ForcedActions(pVM, rc);
2650 STAM_REL_PROFILE_ADV_RESUME(&pVM->em.s.StatRAWTotal, a);
2651 if ( rc != VINF_SUCCESS
2652 && rc != VINF_EM_RESCHEDULE_RAW)
2653 {
2654 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
2655 if (rc != VINF_SUCCESS)
2656 {
2657 *pfFFDone = true;
2658 break;
2659 }
2660 }
2661 }
2662 }
2663
2664 /*
2665 * Return to outer loop.
2666 */
2667#if defined(LOG_ENABLED) && defined(DEBUG)
2668 RTLogFlush(NULL);
2669#endif
2670 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTotal, a);
2671 return rc;
2672}
2673
2674
2675/**
2676 * Executes hardware accelerated raw code. (Intel VMX & AMD SVM)
2677 *
2678 * This function contains the raw-mode version of the inner
2679 * execution loop (the outer loop being in EMR3ExecuteVM()).
2680 *
2681 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
2682 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2683 *
2684 * @param pVM VM handle.
2685 * @param pfFFDone Where to store an indicator telling whether or not
2686 * FFs were done before returning.
2687 */
2688static int emR3HwAccExecute(PVM pVM, bool *pfFFDone)
2689{
2690 int rc = VERR_INTERNAL_ERROR;
2691 PCPUMCTX pCtx = pVM->em.s.pCtx;
2692
2693 LogFlow(("emR3HwAccExecute: (cs:eip=%04x:%VGv)\n", pCtx->cs, pCtx->rip));
2694 *pfFFDone = false;
2695
2696 STAM_COUNTER_INC(&pVM->em.s.StatHwAccExecuteEntry);
2697
2698 /*
2699 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
2700 */
2701 for (;;)
2702 {
2703 STAM_PROFILE_ADV_START(&pVM->em.s.StatHwAccEntry, a);
2704
2705 /*
2706 * Check various preconditions.
2707 */
2708 VM_FF_CLEAR(pVM, (VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS));
2709
2710 /*
2711 * Process high priority pre-execution raw-mode FFs.
2712 */
2713 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2714 {
2715 rc = emR3RawForcedActions(pVM, pCtx);
2716 if (VBOX_FAILURE(rc))
2717 break;
2718 }
2719
2720#ifdef LOG_ENABLED
2721 /*
2722 * Log important stuff before entering GC.
2723 */
2724 if (TRPMHasTrap(pVM))
2725 Log(("Pending hardware interrupt=0x%x cs:eip=%04X:%VGv\n", TRPMGetTrapNo(pVM), pCtx->cs, pCtx->rip));
2726
2727 uint32_t cpl = CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx));
2728 if (pCtx->eflags.Bits.u1VM)
2729 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
2730 else if (CPUMIsGuestIn64BitCode(pVM, CPUMCTX2CORE(pCtx)))
2731 Log(("HWR%d: %04X:%VGv ESP=%VGv IF=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
2732 else
2733 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
2734#endif /* LOG_ENABLED */
2735
2736 /*
2737 * Execute the code.
2738 */
2739 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatHwAccEntry, a);
2740 STAM_PROFILE_START(&pVM->em.s.StatHwAccExec, x);
2741 VMMR3Unlock(pVM);
2742 rc = VMMR3HwAccRunGC(pVM);
2743 VMMR3Lock(pVM);
2744 STAM_PROFILE_STOP(&pVM->em.s.StatHwAccExec, x);
2745
2746 /*
2747 * Deal with high priority post execution FFs before doing anything else.
2748 */
2749 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
2750 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
2751 rc = emR3HighPriorityPostForcedActions(pVM, rc);
2752
2753 /*
2754 * Process the returned status code.
2755 */
2756 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2757 break;
2758
2759 rc = emR3RawHandleRC(pVM, pCtx, rc);
2760 if (rc != VINF_SUCCESS)
2761 break;
2762
2763 /*
2764 * Check and execute forced actions.
2765 */
2766#ifdef VBOX_HIGH_RES_TIMERS_HACK
2767 TMTimerPoll(pVM);
2768#endif
2769 if (VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK))
2770 {
2771 rc = emR3ForcedActions(pVM, rc);
2772 if ( rc != VINF_SUCCESS
2773 && rc != VINF_EM_RESCHEDULE_HWACC)
2774 {
2775 *pfFFDone = true;
2776 break;
2777 }
2778 }
2779 }
2780 /*
2781 * Return to outer loop.
2782 */
2783#if defined(LOG_ENABLED) && defined(DEBUG)
2784 RTLogFlush(NULL);
2785#endif
2786 return rc;
2787}
2788
2789
2790/**
2791 * Decides whether to execute RAW, HWACC or REM.
2792 *
2793 * @returns new EM state
2794 * @param pVM The VM.
2795 * @param pCtx The CPU context.
2796 */
2797DECLINLINE(EMSTATE) emR3Reschedule(PVM pVM, PCPUMCTX pCtx)
2798{
2799 /*
2800 * When forcing raw-mode execution, things are simple.
2801 */
2802 if (pVM->em.s.fForceRAW)
2803 return EMSTATE_RAW;
2804
2805 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2806 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2807 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2808
2809 X86EFLAGS EFlags = pCtx->eflags;
2810 if (HWACCMIsEnabled(pVM))
2811 {
2812 /* Hardware accelerated raw-mode:
2813 *
2814 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
2815 */
2816 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
2817 return EMSTATE_HWACC;
2818
2819 /* Note: Raw mode and hw accelerated mode are incompatible. The latter turns
2820 * off monitoring features essential for raw mode! */
2821 return EMSTATE_REM;
2822 }
2823
2824 /*
2825 * Standard raw-mode:
2826 *
2827 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
2828 * or 32 bits protected mode ring 0 code
2829 *
2830 * The tests are ordered by the likelyhood of being true during normal execution.
2831 */
2832 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
2833 {
2834 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
2835 return EMSTATE_REM;
2836 }
2837
2838#ifndef VBOX_RAW_V86
2839 if (EFlags.u32 & X86_EFL_VM) {
2840 Log2(("raw mode refused: VM_MASK\n"));
2841 return EMSTATE_REM;
2842 }
2843#endif
2844
2845 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
2846 uint32_t u32CR0 = pCtx->cr0;
2847 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
2848 {
2849 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
2850 return EMSTATE_REM;
2851 }
2852
2853 if (pCtx->cr4 & X86_CR4_PAE)
2854 {
2855 uint32_t u32Dummy, u32Features;
2856
2857 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
2858 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
2859 return EMSTATE_REM;
2860 }
2861
2862 unsigned uSS = pCtx->ss;
2863 if ( pCtx->eflags.Bits.u1VM
2864 || (uSS & X86_SEL_RPL) == 3)
2865 {
2866 if (!EMIsRawRing3Enabled(pVM))
2867 return EMSTATE_REM;
2868
2869 if (!(EFlags.u32 & X86_EFL_IF))
2870 {
2871 Log2(("raw mode refused: IF (RawR3)\n"));
2872 return EMSTATE_REM;
2873 }
2874
2875 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
2876 {
2877 Log2(("raw mode refused: CR0.WP + RawR0\n"));
2878 return EMSTATE_REM;
2879 }
2880 }
2881 else
2882 {
2883 if (!EMIsRawRing0Enabled(pVM))
2884 return EMSTATE_REM;
2885
2886 /* Only ring 0 supervisor code. */
2887 if ((uSS & X86_SEL_RPL) != 0)
2888 {
2889 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
2890 return EMSTATE_REM;
2891 }
2892
2893 // Let's start with pure 32 bits ring 0 code first
2894 /** @todo What's pure 32-bit mode? flat? */
2895 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
2896 || !(pCtx->csHid.Attr.n.u1DefBig))
2897 {
2898 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
2899 return EMSTATE_REM;
2900 }
2901
2902 /* Write protection muts be turned on, or else the guest can overwrite our hypervisor code and data. */
2903 if (!(u32CR0 & X86_CR0_WP))
2904 {
2905 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
2906 return EMSTATE_REM;
2907 }
2908
2909 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
2910 {
2911 Log2(("raw r0 mode forced: patch code\n"));
2912 return EMSTATE_RAW;
2913 }
2914
2915#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
2916 if (!(EFlags.u32 & X86_EFL_IF))
2917 {
2918 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
2919 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
2920 return EMSTATE_REM;
2921 }
2922#endif
2923
2924 /** @todo still necessary??? */
2925 if (EFlags.Bits.u2IOPL != 0)
2926 {
2927 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
2928 return EMSTATE_REM;
2929 }
2930 }
2931
2932 Assert(PGMPhysIsA20Enabled(pVM));
2933 return EMSTATE_RAW;
2934}
2935
2936
2937/**
2938 * Executes all high priority post execution force actions.
2939 *
2940 * @returns rc or a fatal status code.
2941 *
2942 * @param pVM VM handle.
2943 * @param rc The current rc.
2944 */
2945static int emR3HighPriorityPostForcedActions(PVM pVM, int rc)
2946{
2947 if (VM_FF_ISSET(pVM, VM_FF_PDM_CRITSECT))
2948 PDMR3CritSectFF(pVM);
2949
2950 if (VM_FF_ISSET(pVM, VM_FF_CSAM_PENDING_ACTION))
2951 CSAMR3DoPendingAction(pVM);
2952
2953 return rc;
2954}
2955
2956
2957/**
2958 * Executes all pending forced actions.
2959 *
2960 * Forced actions can cause execution delays and execution
2961 * rescheduling. The first we deal with using action priority, so
2962 * that for instance pending timers aren't scheduled and ran until
2963 * right before execution. The rescheduling we deal with using
2964 * return codes. The same goes for VM termination, only in that case
2965 * we exit everything.
2966 *
2967 * @returns VBox status code of equal or greater importance/severity than rc.
2968 * The most important ones are: VINF_EM_RESCHEDULE,
2969 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2970 *
2971 * @param pVM VM handle.
2972 * @param rc The current rc.
2973 *
2974 */
2975static int emR3ForcedActions(PVM pVM, int rc)
2976{
2977 STAM_REL_PROFILE_START(&pVM->em.s.StatForcedActions, a);
2978#ifdef VBOX_STRICT
2979 int rcIrq = VINF_SUCCESS;
2980#endif
2981 int rc2;
2982#define UPDATE_RC() \
2983 do { \
2984 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Vra\n", rc2)); \
2985 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
2986 break; \
2987 if (!rc || rc2 < rc) \
2988 rc = rc2; \
2989 } while (0)
2990
2991 /*
2992 * Post execution chunk first.
2993 */
2994 if (VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK))
2995 {
2996 /*
2997 * Termination request.
2998 */
2999 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
3000 {
3001 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3002 STAM_REL_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3003 return VINF_EM_TERMINATE;
3004 }
3005
3006 /*
3007 * Debugger Facility polling.
3008 */
3009 if (VM_FF_ISSET(pVM, VM_FF_DBGF))
3010 {
3011 rc2 = DBGFR3VMMForcedAction(pVM);
3012 UPDATE_RC();
3013 }
3014
3015 /*
3016 * Postponed reset request.
3017 */
3018 if (VM_FF_ISSET(pVM, VM_FF_RESET))
3019 {
3020 rc2 = VMR3Reset(pVM);
3021 UPDATE_RC();
3022 VM_FF_CLEAR(pVM, VM_FF_RESET);
3023 }
3024
3025 /*
3026 * CSAM page scanning.
3027 */
3028 if (VM_FF_ISSET(pVM, VM_FF_CSAM_SCAN_PAGE))
3029 {
3030 PCPUMCTX pCtx = pVM->em.s.pCtx;
3031
3032 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
3033 Log(("Forced action VM_FF_CSAM_SCAN_PAGE\n"));
3034
3035 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
3036 VM_FF_CLEAR(pVM, VM_FF_CSAM_SCAN_PAGE);
3037 }
3038
3039 /* check that we got them all */
3040 Assert(!(VM_FF_NORMAL_PRIORITY_POST_MASK & ~(VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_CSAM_SCAN_PAGE)));
3041 }
3042
3043 /*
3044 * Normal priority then.
3045 * (Executed in no particular order.)
3046 */
3047 if (VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_MASK))
3048 {
3049 /*
3050 * PDM Queues are pending.
3051 */
3052 if (VM_FF_ISSET(pVM, VM_FF_PDM_QUEUES))
3053 PDMR3QueueFlushAll(pVM);
3054
3055 /*
3056 * PDM DMA transfers are pending.
3057 */
3058 if (VM_FF_ISSET(pVM, VM_FF_PDM_DMA))
3059 PDMR3DmaRun(pVM);
3060
3061 /*
3062 * Requests from other threads.
3063 */
3064 if (VM_FF_ISSET(pVM, VM_FF_REQUEST))
3065 {
3066 rc2 = VMR3ReqProcessU(pVM->pUVM);
3067 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE)
3068 {
3069 Log2(("emR3ForcedActions: returns %Vrc\n", rc2));
3070 STAM_REL_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3071 return rc2;
3072 }
3073 UPDATE_RC();
3074 }
3075
3076 /* Replay the handler notification changes. */
3077 if (VM_FF_ISSET(pVM, VM_FF_REM_HANDLER_NOTIFY))
3078 REMR3ReplayHandlerNotifications(pVM);
3079
3080 /* check that we got them all */
3081 Assert(!(VM_FF_NORMAL_PRIORITY_MASK & ~(VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)));
3082 }
3083
3084 /*
3085 * Execute polling function ever so often.
3086 * THIS IS A HACK, IT WILL BE *REPLACED* BY PROPER ASYNC NETWORKING "SOON"!
3087 */
3088 static unsigned cLast = 0;
3089 if (!((++cLast) % 4))
3090 PDMR3Poll(pVM);
3091
3092 /*
3093 * High priority pre execution chunk last.
3094 * (Executed in ascending priority order.)
3095 */
3096 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK))
3097 {
3098 /*
3099 * Timers before interrupts.
3100 */
3101 if (VM_FF_ISSET(pVM, VM_FF_TIMER))
3102 TMR3TimerQueuesDo(pVM);
3103
3104 /*
3105 * The instruction following an emulated STI should *always* be executed!
3106 */
3107 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
3108 {
3109 Log(("VM_FF_EMULATED_STI at %VGv successor %VGv\n", (RTGCPTR)CPUMGetGuestRIP(pVM), EMGetInhibitInterruptsPC(pVM)));
3110 if (CPUMGetGuestEIP(pVM) != EMGetInhibitInterruptsPC(pVM))
3111 {
3112 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
3113 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
3114 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
3115 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
3116 */
3117 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
3118 }
3119 if (HWACCMR3IsActive(pVM))
3120 rc2 = VINF_EM_RESCHEDULE_HWACC;
3121 else
3122 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
3123
3124 UPDATE_RC();
3125 }
3126
3127 /*
3128 * Interrupts.
3129 */
3130 if ( !VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)
3131 && (!rc || rc >= VINF_EM_RESCHEDULE_RAW)
3132 && !TRPMHasTrap(pVM) /* an interrupt could already be scheduled for dispatching in the recompiler. */
3133 && PATMAreInterruptsEnabled(pVM)
3134 && !HWACCMR3IsEventPending(pVM))
3135 {
3136 if (VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
3137 {
3138 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
3139 /** @todo this really isn't nice, should properly handle this */
3140 rc2 = TRPMR3InjectEvent(pVM, TRPM_HARDWARE_INT);
3141#ifdef VBOX_STRICT
3142 rcIrq = rc2;
3143#endif
3144 UPDATE_RC();
3145 }
3146 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
3147 else if (REMR3QueryPendingInterrupt(pVM) != REM_NO_PENDING_IRQ)
3148 {
3149 rc2 = VINF_EM_RESCHEDULE_REM;
3150 UPDATE_RC();
3151 }
3152 }
3153
3154 /*
3155 * Allocate handy pages.
3156 */
3157 if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
3158 {
3159 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3160 UPDATE_RC();
3161 }
3162
3163 /*
3164 * Debugger Facility request.
3165 */
3166 if (VM_FF_ISSET(pVM, VM_FF_DBGF))
3167 {
3168 rc2 = DBGFR3VMMForcedAction(pVM);
3169 UPDATE_RC();
3170 }
3171
3172 /*
3173 * Termination request.
3174 */
3175 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
3176 {
3177 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3178 STAM_REL_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3179 return VINF_EM_TERMINATE;
3180 }
3181
3182#ifdef DEBUG
3183 /*
3184 * Debug, pause the VM.
3185 */
3186 if (VM_FF_ISSET(pVM, VM_FF_DEBUG_SUSPEND))
3187 {
3188 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
3189 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
3190 return VINF_EM_SUSPEND;
3191 }
3192
3193#endif
3194 /* check that we got them all */
3195 Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_DBGF | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_INHIBIT_INTERRUPTS | VM_FF_PGM_NEED_HANDY_PAGES)));
3196 }
3197
3198#undef UPDATE_RC
3199 Log2(("emR3ForcedActions: returns %Vrc\n", rc));
3200 STAM_REL_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3201 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
3202 return rc;
3203}
3204
3205
3206/**
3207 * Execute VM.
3208 *
3209 * This function is the main loop of the VM. The emulation thread
3210 * calls this function when the VM has been successfully constructed
3211 * and we're ready for executing the VM.
3212 *
3213 * Returning from this function means that the VM is turned off or
3214 * suspended (state already saved) and deconstruction in next in line.
3215 *
3216 * All interaction from other thread are done using forced actions
3217 * and signaling of the wait object.
3218 *
3219 * @returns VBox status code.
3220 * @param pVM The VM to operate on.
3221 */
3222EMR3DECL(int) EMR3ExecuteVM(PVM pVM)
3223{
3224 LogFlow(("EMR3ExecuteVM: pVM=%p enmVMState=%d enmState=%d (%s) fForceRAW=%d\n", pVM, pVM->enmVMState,
3225 pVM->em.s.enmState, EMR3GetStateName(pVM->em.s.enmState), pVM->em.s.fForceRAW));
3226 VM_ASSERT_EMT(pVM);
3227 Assert(pVM->em.s.enmState == EMSTATE_NONE || pVM->em.s.enmState == EMSTATE_SUSPENDED);
3228
3229 VMMR3Lock(pVM);
3230
3231 int rc = setjmp(pVM->em.s.u.FatalLongJump);
3232 if (rc == 0)
3233 {
3234 /*
3235 * Start the virtual time.
3236 */
3237 rc = TMVirtualResume(pVM);
3238 Assert(rc == VINF_SUCCESS);
3239 rc = TMCpuTickResume(pVM);
3240 Assert(rc == VINF_SUCCESS);
3241
3242 /*
3243 * The Outer Main Loop.
3244 */
3245 bool fFFDone = false;
3246 rc = VINF_EM_RESCHEDULE;
3247 pVM->em.s.enmState = EMSTATE_REM;
3248 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatTotal, x);
3249 for (;;)
3250 {
3251 /*
3252 * Before we can schedule anything (we're here because
3253 * scheduling is required) we must service any pending
3254 * forced actions to avoid any pending action causing
3255 * immediate rescheduling upon entering an inner loop
3256 *
3257 * Do forced actions.
3258 */
3259 if ( !fFFDone
3260 && rc != VINF_EM_TERMINATE
3261 && rc != VINF_EM_OFF
3262 && VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK))
3263 {
3264 rc = emR3ForcedActions(pVM, rc);
3265 if ( ( rc == VINF_EM_RESCHEDULE_REM
3266 || rc == VINF_EM_RESCHEDULE_HWACC)
3267 && pVM->em.s.fForceRAW)
3268 rc = VINF_EM_RESCHEDULE_RAW;
3269 }
3270 else if (fFFDone)
3271 fFFDone = false;
3272
3273 /*
3274 * Now what to do?
3275 */
3276 Log2(("EMR3ExecuteVM: rc=%Vrc\n", rc));
3277 switch (rc)
3278 {
3279 /*
3280 * Keep doing what we're currently doing.
3281 */
3282 case VINF_SUCCESS:
3283 break;
3284
3285 /*
3286 * Reschedule - to raw-mode execution.
3287 */
3288 case VINF_EM_RESCHEDULE_RAW:
3289 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVM->em.s.enmState, EMSTATE_RAW));
3290 pVM->em.s.enmState = EMSTATE_RAW;
3291 break;
3292
3293 /*
3294 * Reschedule - to hardware accelerated raw-mode execution.
3295 */
3296 case VINF_EM_RESCHEDULE_HWACC:
3297 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVM->em.s.enmState, EMSTATE_HWACC));
3298 Assert(!pVM->em.s.fForceRAW);
3299 pVM->em.s.enmState = EMSTATE_HWACC;
3300 break;
3301
3302 /*
3303 * Reschedule - to recompiled execution.
3304 */
3305 case VINF_EM_RESCHEDULE_REM:
3306 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVM->em.s.enmState, EMSTATE_REM));
3307 pVM->em.s.enmState = EMSTATE_REM;
3308 break;
3309
3310 /*
3311 * Resume.
3312 */
3313 case VINF_EM_RESUME:
3314 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVM->em.s.enmState));
3315 /* fall through and get scheduled. */
3316
3317 /*
3318 * Reschedule.
3319 */
3320 case VINF_EM_RESCHEDULE:
3321 {
3322 EMSTATE enmState = emR3Reschedule(pVM, pVM->em.s.pCtx);
3323 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVM->em.s.enmState, enmState, EMR3GetStateName(enmState)));
3324 pVM->em.s.enmState = enmState;
3325 break;
3326 }
3327
3328 /*
3329 * Halted.
3330 */
3331 case VINF_EM_HALT:
3332 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVM->em.s.enmState, EMSTATE_HALTED));
3333 pVM->em.s.enmState = EMSTATE_HALTED;
3334 break;
3335
3336 /*
3337 * Suspend.
3338 */
3339 case VINF_EM_SUSPEND:
3340 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVM->em.s.enmState, EMSTATE_SUSPENDED));
3341 pVM->em.s.enmState = EMSTATE_SUSPENDED;
3342 break;
3343
3344 /*
3345 * Reset.
3346 * We might end up doing a double reset for now, we'll have to clean up the mess later.
3347 */
3348 case VINF_EM_RESET:
3349 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d\n", pVM->em.s.enmState, EMSTATE_REM));
3350 pVM->em.s.enmState = EMSTATE_REM;
3351 break;
3352
3353 /*
3354 * Power Off.
3355 */
3356 case VINF_EM_OFF:
3357 pVM->em.s.enmState = EMSTATE_TERMINATING;
3358 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVM->em.s.enmState, EMSTATE_TERMINATING));
3359 TMVirtualPause(pVM);
3360 TMCpuTickPause(pVM);
3361 VMMR3Unlock(pVM);
3362 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3363 return rc;
3364
3365 /*
3366 * Terminate the VM.
3367 */
3368 case VINF_EM_TERMINATE:
3369 pVM->em.s.enmState = EMSTATE_TERMINATING;
3370 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVM->em.s.enmState, EMSTATE_TERMINATING));
3371 TMVirtualPause(pVM);
3372 TMCpuTickPause(pVM);
3373 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3374 return rc;
3375
3376 /*
3377 * Guest debug events.
3378 */
3379 case VINF_EM_DBG_STEPPED:
3380 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
3381 case VINF_EM_DBG_STOP:
3382 case VINF_EM_DBG_BREAKPOINT:
3383 case VINF_EM_DBG_STEP:
3384 if (pVM->em.s.enmState == EMSTATE_RAW)
3385 {
3386 Log2(("EMR3ExecuteVM: %Vrc: %d -> %d\n", rc, pVM->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
3387 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
3388 }
3389 else
3390 {
3391 Log2(("EMR3ExecuteVM: %Vrc: %d -> %d\n", rc, pVM->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
3392 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
3393 }
3394 break;
3395
3396 /*
3397 * Hypervisor debug events.
3398 */
3399 case VINF_EM_DBG_HYPER_STEPPED:
3400 case VINF_EM_DBG_HYPER_BREAKPOINT:
3401 case VINF_EM_DBG_HYPER_ASSERTION:
3402 Log2(("EMR3ExecuteVM: %Vrc: %d -> %d\n", rc, pVM->em.s.enmState, EMSTATE_DEBUG_HYPER));
3403 pVM->em.s.enmState = EMSTATE_DEBUG_HYPER;
3404 break;
3405
3406 /*
3407 * Any error code showing up here other than the ones we
3408 * know and process above are considered to be FATAL.
3409 *
3410 * Unknown warnings and informational status codes are also
3411 * included in this.
3412 */
3413 default:
3414 if (VBOX_SUCCESS(rc))
3415 {
3416 AssertMsgFailed(("Unexpected warning or informational status code %Vra!\n", rc));
3417 rc = VERR_EM_INTERNAL_ERROR;
3418 }
3419 pVM->em.s.enmState = EMSTATE_GURU_MEDITATION;
3420 Log(("EMR3ExecuteVM returns %d\n", rc));
3421 break;
3422 }
3423
3424
3425 /*
3426 * Any waiters can now be woken up
3427 */
3428 VMMR3Unlock(pVM);
3429 VMMR3Lock(pVM);
3430
3431 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x); /* (skip this in release) */
3432 STAM_PROFILE_ADV_START(&pVM->em.s.StatTotal, x);
3433
3434 /*
3435 * Act on the state.
3436 */
3437 switch (pVM->em.s.enmState)
3438 {
3439 /*
3440 * Execute raw.
3441 */
3442 case EMSTATE_RAW:
3443 rc = emR3RawExecute(pVM, &fFFDone);
3444 break;
3445
3446 /*
3447 * Execute hardware accelerated raw.
3448 */
3449 case EMSTATE_HWACC:
3450 rc = emR3HwAccExecute(pVM, &fFFDone);
3451 break;
3452
3453 /*
3454 * Execute recompiled.
3455 */
3456 case EMSTATE_REM:
3457 rc = emR3RemExecute(pVM, &fFFDone);
3458 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Vrc\n", rc));
3459 break;
3460
3461 /*
3462 * hlt - execution halted until interrupt.
3463 */
3464 case EMSTATE_HALTED:
3465 {
3466 STAM_REL_PROFILE_START(&pVM->em.s.StatHalted, y);
3467 rc = VMR3WaitHalted(pVM, !(CPUMGetGuestEFlags(pVM) & X86_EFL_IF));
3468 STAM_REL_PROFILE_STOP(&pVM->em.s.StatHalted, y);
3469 break;
3470 }
3471
3472 /*
3473 * Suspended - return to VM.cpp.
3474 */
3475 case EMSTATE_SUSPENDED:
3476 TMVirtualPause(pVM);
3477 TMCpuTickPause(pVM);
3478 VMMR3Unlock(pVM);
3479 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3480 return VINF_EM_SUSPEND;
3481
3482 /*
3483 * Debugging in the guest.
3484 */
3485 case EMSTATE_DEBUG_GUEST_REM:
3486 case EMSTATE_DEBUG_GUEST_RAW:
3487 TMVirtualPause(pVM);
3488 TMCpuTickPause(pVM);
3489 rc = emR3Debug(pVM, rc);
3490 TMVirtualResume(pVM);
3491 TMCpuTickResume(pVM);
3492 Log2(("EMR3ExecuteVM: enmr3Debug -> %Vrc (state %d)\n", rc, pVM->em.s.enmState));
3493 break;
3494
3495 /*
3496 * Debugging in the hypervisor.
3497 */
3498 case EMSTATE_DEBUG_HYPER:
3499 {
3500 TMVirtualPause(pVM);
3501 TMCpuTickPause(pVM);
3502 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3503
3504 rc = emR3Debug(pVM, rc);
3505 Log2(("EMR3ExecuteVM: enmr3Debug -> %Vrc (state %d)\n", rc, pVM->em.s.enmState));
3506 if (rc != VINF_SUCCESS)
3507 {
3508 /* switch to guru meditation mode */
3509 pVM->em.s.enmState = EMSTATE_GURU_MEDITATION;
3510 VMMR3FatalDump(pVM, rc);
3511 return rc;
3512 }
3513
3514 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatTotal, x);
3515 TMVirtualResume(pVM);
3516 TMCpuTickResume(pVM);
3517 break;
3518 }
3519
3520 /*
3521 * Guru meditation takes place in the debugger.
3522 */
3523 case EMSTATE_GURU_MEDITATION:
3524 {
3525 TMVirtualPause(pVM);
3526 TMCpuTickPause(pVM);
3527 VMMR3FatalDump(pVM, rc);
3528 emR3Debug(pVM, rc);
3529 VMMR3Unlock(pVM);
3530 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3531 return rc;
3532 }
3533
3534 /*
3535 * The states we don't expect here.
3536 */
3537 case EMSTATE_NONE:
3538 case EMSTATE_TERMINATING:
3539 default:
3540 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVM->em.s.enmState));
3541 pVM->em.s.enmState = EMSTATE_GURU_MEDITATION;
3542 TMVirtualPause(pVM);
3543 TMCpuTickPause(pVM);
3544 VMMR3Unlock(pVM);
3545 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3546 return VERR_EM_INTERNAL_ERROR;
3547 }
3548 } /* The Outer Main Loop */
3549 }
3550 else
3551 {
3552 /*
3553 * Fatal error.
3554 */
3555 LogFlow(("EMR3ExecuteVM: returns %Vrc (longjmp / fatal error)\n", rc));
3556 TMVirtualPause(pVM);
3557 TMCpuTickPause(pVM);
3558 VMMR3FatalDump(pVM, rc);
3559 emR3Debug(pVM, rc);
3560 VMMR3Unlock(pVM);
3561 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3562 /** @todo change the VM state! */
3563 return rc;
3564 }
3565
3566 /* (won't ever get here). */
3567 AssertFailed();
3568}
3569
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette