VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 19454

Last change on this file since 19454 was 19442, checked in by vboxsync, 15 years ago

Don't reschedule in the halted or wait for SIPI case.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 162.1 KB
Line 
1/* $Id: EM.cpp 19442 2009-05-06 15:34:20Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_em EM - The Execution Monitor / Manager
23 *
24 * The Execution Monitor/Manager is responsible for running the VM, scheduling
25 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
26 * Interpreted), and keeping the CPU states in sync. The function
27 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
28 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
29 * emR3RemExecute).
30 *
31 * The interpreted execution is only used to avoid switching between
32 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
33 * The interpretation is thus implemented as part of EM.
34 *
35 * @see grp_em
36 */
37
38/*******************************************************************************
39* Header Files *
40*******************************************************************************/
41#define LOG_GROUP LOG_GROUP_EM
42#include <VBox/em.h>
43#include <VBox/vmm.h>
44#ifdef VBOX_WITH_VMI
45# include <VBox/parav.h>
46#endif
47#include <VBox/patm.h>
48#include <VBox/csam.h>
49#include <VBox/selm.h>
50#include <VBox/trpm.h>
51#include <VBox/iom.h>
52#include <VBox/dbgf.h>
53#include <VBox/pgm.h>
54#include <VBox/rem.h>
55#include <VBox/tm.h>
56#include <VBox/mm.h>
57#include <VBox/ssm.h>
58#include <VBox/pdmapi.h>
59#include <VBox/pdmcritsect.h>
60#include <VBox/pdmqueue.h>
61#include <VBox/hwaccm.h>
62#include <VBox/patm.h>
63#include "EMInternal.h"
64#include <VBox/vm.h>
65#include <VBox/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include <VBox/dbgf.h>
69
70#include <VBox/log.h>
71#include <iprt/thread.h>
72#include <iprt/assert.h>
73#include <iprt/asm.h>
74#include <iprt/semaphore.h>
75#include <iprt/string.h>
76#include <iprt/avl.h>
77#include <iprt/stream.h>
78#include <VBox/param.h>
79#include <VBox/err.h>
80
81
82/*******************************************************************************
83* Defined Constants And Macros *
84*******************************************************************************/
85#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
86#define EM_NOTIFY_HWACCM
87#endif
88
89
90/*******************************************************************************
91* Internal Functions *
92*******************************************************************************/
93static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
94static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
95static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
96static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98static int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu);
99static int emR3RawStep(PVM pVM, PVMCPU pVCpu);
100DECLINLINE(int) emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
101DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
102static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
103static int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
104DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
105static int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
106static int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
107static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu);
108static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret);
109static int emR3SingleStepExecRem(PVM pVM, uint32_t cIterations);
110static EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
111static void emR3RemLock(PVM pVM);
112static void emR3RemUnlock(PVM pVM);
113
114/**
115 * Initializes the EM.
116 *
117 * @returns VBox status code.
118 * @param pVM The VM to operate on.
119 */
120VMMR3DECL(int) EMR3Init(PVM pVM)
121{
122 LogFlow(("EMR3Init\n"));
123 /*
124 * Assert alignment and sizes.
125 */
126 AssertCompileMemberAlignment(VM, em.s, 32);
127 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
128 AssertReleaseMsg(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump),
129 ("%d bytes, padding %d\n", sizeof(pVM->aCpus[0].em.s.u.FatalLongJump), sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump)));
130
131 /*
132 * Init the structure.
133 */
134 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
135 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
136 if (RT_FAILURE(rc))
137 pVM->fRawR3Enabled = true;
138 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
139 if (RT_FAILURE(rc))
140 pVM->fRawR0Enabled = true;
141 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
142
143 /*
144 * Initialize the REM critical section.
145 */
146 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, "EM-REM");
147 AssertRCReturn(rc, rc);
148
149 /*
150 * Saved state.
151 */
152 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
153 NULL, emR3Save, NULL,
154 NULL, emR3Load, NULL);
155 if (RT_FAILURE(rc))
156 return rc;
157
158 for (unsigned i=0;i<pVM->cCPUs;i++)
159 {
160 PVMCPU pVCpu = &pVM->aCpus[i];
161
162 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
163
164 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
165 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
166 pVCpu->em.s.fForceRAW = false;
167
168 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
169 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
170 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
171
172# define EM_REG_COUNTER(a, b, c) \
173 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
174 AssertRC(rc);
175
176# define EM_REG_COUNTER_USED(a, b, c) \
177 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
178 AssertRC(rc);
179
180# define EM_REG_PROFILE(a, b, c) \
181 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
182 AssertRC(rc);
183
184# define EM_REG_PROFILE_ADV(a, b, c) \
185 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
186 AssertRC(rc);
187
188 /*
189 * Statistics.
190 */
191#ifdef VBOX_WITH_STATISTICS
192 PEMSTATS pStats;
193 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
194 if (RT_FAILURE(rc))
195 return rc;
196
197 pVCpu->em.s.pStatsR3 = pStats;
198 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
199 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
200
201 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
202 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
203
204 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
205 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
206
207 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
208 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
209 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
210 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
211 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
212 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
213 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
214 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
215 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
279
280 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
281 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
282
283 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MONITOR was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MONITOR was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
333
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
362
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
367
368 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
369 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
370 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
371 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
372 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
373 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
374 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
375 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 read instructions.");
376 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 read instructions.");
377 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 read instructions.");
378 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 read instructions.");
379 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 read instructions.");
380 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 write instructions.");
381 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 write instructions.");
382 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 write instructions.");
383 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 write instructions.");
384 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 write instructions.");
385 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
386 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
387 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
388 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
389 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
390 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
391 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
392 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
393 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
394
395 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
396 pVCpu->em.s.pCliStatTree = 0;
397
398 /* these should be considered for release statistics. */
399 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
400 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
401 EM_REG_COUNTER(&pVCpu->em.s.StatMiscEmu, "/PROF/CPU%d/EM/Emulation/Misc", "Profiling of emR3RawExecuteInstruction.");
402 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
403 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
404 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
405 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
406 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
407 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
408 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
409 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
410
411#endif /* VBOX_WITH_STATISTICS */
412
413 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
414 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
415 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
416 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
417
418 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
419 }
420
421 return VINF_SUCCESS;
422}
423
424
425/**
426 * Initializes the per-VCPU EM.
427 *
428 * @returns VBox status code.
429 * @param pVM The VM to operate on.
430 */
431VMMR3DECL(int) EMR3InitCPU(PVM pVM)
432{
433 LogFlow(("EMR3InitCPU\n"));
434 return VINF_SUCCESS;
435}
436
437
438/**
439 * Applies relocations to data and code managed by this
440 * component. This function will be called at init and
441 * whenever the VMM need to relocate it self inside the GC.
442 *
443 * @param pVM The VM.
444 */
445VMMR3DECL(void) EMR3Relocate(PVM pVM)
446{
447 LogFlow(("EMR3Relocate\n"));
448 for (unsigned i=0;i<pVM->cCPUs;i++)
449 {
450 PVMCPU pVCpu = &pVM->aCpus[i];
451
452 if (pVCpu->em.s.pStatsR3)
453 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
454 }
455}
456
457
458/**
459 * Reset notification.
460 *
461 * @param pVM
462 */
463VMMR3DECL(void) EMR3Reset(PVM pVM)
464{
465 LogFlow(("EMR3Reset: \n"));
466 for (unsigned i=0;i<pVM->cCPUs;i++)
467 {
468 PVMCPU pVCpu = &pVM->aCpus[i];
469
470 pVCpu->em.s.fForceRAW = false;
471 }
472}
473
474
475/**
476 * Terminates the EM.
477 *
478 * Termination means cleaning up and freeing all resources,
479 * the VM it self is at this point powered off or suspended.
480 *
481 * @returns VBox status code.
482 * @param pVM The VM to operate on.
483 */
484VMMR3DECL(int) EMR3Term(PVM pVM)
485{
486 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
487
488 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
489 return VINF_SUCCESS;
490}
491
492/**
493 * Terminates the per-VCPU EM.
494 *
495 * Termination means cleaning up and freeing all resources,
496 * the VM it self is at this point powered off or suspended.
497 *
498 * @returns VBox status code.
499 * @param pVM The VM to operate on.
500 */
501VMMR3DECL(int) EMR3TermCPU(PVM pVM)
502{
503 return 0;
504}
505
506/**
507 * Execute state save operation.
508 *
509 * @returns VBox status code.
510 * @param pVM VM Handle.
511 * @param pSSM SSM operation handle.
512 */
513static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
514{
515 for (unsigned i=0;i<pVM->cCPUs;i++)
516 {
517 PVMCPU pVCpu = &pVM->aCpus[i];
518
519 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
520 AssertRCReturn(rc, rc);
521 }
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Execute state load operation.
528 *
529 * @returns VBox status code.
530 * @param pVM VM Handle.
531 * @param pSSM SSM operation handle.
532 * @param u32Version Data layout version.
533 */
534static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
535{
536 int rc = VINF_SUCCESS;
537
538 /*
539 * Validate version.
540 */
541 if (u32Version != EM_SAVED_STATE_VERSION)
542 {
543 AssertMsgFailed(("emR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, EM_SAVED_STATE_VERSION));
544 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
545 }
546
547 /*
548 * Load the saved state.
549 */
550 for (unsigned i=0;i<pVM->cCPUs;i++)
551 {
552 PVMCPU pVCpu = &pVM->aCpus[i];
553
554 rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
555 if (RT_FAILURE(rc))
556 pVCpu->em.s.fForceRAW = false;
557
558 Assert(!pVCpu->em.s.pCliStatTree);
559 }
560 return rc;
561}
562
563
564/**
565 * Enables or disables a set of raw-mode execution modes.
566 *
567 * @returns VINF_SUCCESS on success.
568 * @returns VINF_RESCHEDULE if a rescheduling might be required.
569 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
570 *
571 * @param pVM The VM to operate on.
572 * @param enmMode The execution mode change.
573 * @thread The emulation thread.
574 */
575VMMR3DECL(int) EMR3RawSetMode(PVM pVM, EMRAWMODE enmMode)
576{
577 switch (enmMode)
578 {
579 case EMRAW_NONE:
580 pVM->fRawR3Enabled = false;
581 pVM->fRawR0Enabled = false;
582 break;
583 case EMRAW_RING3_ENABLE:
584 pVM->fRawR3Enabled = true;
585 break;
586 case EMRAW_RING3_DISABLE:
587 pVM->fRawR3Enabled = false;
588 break;
589 case EMRAW_RING0_ENABLE:
590 pVM->fRawR0Enabled = true;
591 break;
592 case EMRAW_RING0_DISABLE:
593 pVM->fRawR0Enabled = false;
594 break;
595 default:
596 AssertMsgFailed(("Invalid enmMode=%d\n", enmMode));
597 return VERR_INVALID_PARAMETER;
598 }
599 Log(("EMR3SetRawMode: fRawR3Enabled=%RTbool fRawR0Enabled=%RTbool\n",
600 pVM->fRawR3Enabled, pVM->fRawR0Enabled));
601 return pVM->aCpus[0].em.s.enmState == EMSTATE_RAW ? VINF_EM_RESCHEDULE : VINF_SUCCESS;
602}
603
604
605/**
606 * Raise a fatal error.
607 *
608 * Safely terminate the VM with full state report and stuff. This function
609 * will naturally never return.
610 *
611 * @param pVCpu VMCPU handle.
612 * @param rc VBox status code.
613 */
614VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
615{
616 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
617 AssertReleaseMsgFailed(("longjmp returned!\n"));
618}
619
620
621/**
622 * Gets the EM state name.
623 *
624 * @returns pointer to read only state name,
625 * @param enmState The state.
626 */
627VMMR3DECL(const char *) EMR3GetStateName(EMSTATE enmState)
628{
629 switch (enmState)
630 {
631 case EMSTATE_NONE: return "EMSTATE_NONE";
632 case EMSTATE_RAW: return "EMSTATE_RAW";
633 case EMSTATE_HWACC: return "EMSTATE_HWACC";
634 case EMSTATE_REM: return "EMSTATE_REM";
635 case EMSTATE_PARAV: return "EMSTATE_PARAV";
636 case EMSTATE_HALTED: return "EMSTATE_HALTED";
637 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
638 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
639 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
640 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
641 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
642 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
643 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
644 default: return "Unknown!";
645 }
646}
647
648
649#ifdef VBOX_WITH_STATISTICS
650/**
651 * Just a braindead function to keep track of cli addresses.
652 * @param pVM VM handle.
653 * @param pVMCPU VMCPU handle.
654 * @param GCPtrInstr The EIP of the cli instruction.
655 */
656static void emR3RecordCli(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrInstr)
657{
658 PCLISTAT pRec;
659
660 pRec = (PCLISTAT)RTAvlPVGet(&pVCpu->em.s.pCliStatTree, (AVLPVKEY)GCPtrInstr);
661 if (!pRec)
662 {
663 /* New cli instruction; insert into the tree. */
664 pRec = (PCLISTAT)MMR3HeapAllocZ(pVM, MM_TAG_EM, sizeof(*pRec));
665 Assert(pRec);
666 if (!pRec)
667 return;
668 pRec->Core.Key = (AVLPVKEY)GCPtrInstr;
669
670 char szCliStatName[32];
671 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%RGv", GCPtrInstr);
672 STAM_REG(pVM, &pRec->Counter, STAMTYPE_COUNTER, szCliStatName, STAMUNIT_OCCURENCES, "Number of times cli was executed.");
673
674 bool fRc = RTAvlPVInsert(&pVCpu->em.s.pCliStatTree, &pRec->Core);
675 Assert(fRc); NOREF(fRc);
676 }
677 STAM_COUNTER_INC(&pRec->Counter);
678 STAM_COUNTER_INC(&pVCpu->em.s.StatTotalClis);
679}
680#endif /* VBOX_WITH_STATISTICS */
681
682
683/**
684 * Debug loop.
685 *
686 * @returns VBox status code for EM.
687 * @param pVM VM handle.
688 * @param pVCpu VMCPU handle.
689 * @param rc Current EM VBox status code..
690 */
691static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
692{
693 for (;;)
694 {
695 Log(("emR3Debug: rc=%Rrc\n", rc));
696 const int rcLast = rc;
697
698 /*
699 * Debug related RC.
700 */
701 switch (rc)
702 {
703 /*
704 * Single step an instruction.
705 */
706 case VINF_EM_DBG_STEP:
707 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
708 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
709 || pVCpu->em.s.fForceRAW /* paranoia */)
710 rc = emR3RawStep(pVM, pVCpu);
711 else
712 {
713 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
714 rc = emR3RemStep(pVM, pVCpu);
715 }
716 break;
717
718 /*
719 * Simple events: stepped, breakpoint, stop/assertion.
720 */
721 case VINF_EM_DBG_STEPPED:
722 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
723 break;
724
725 case VINF_EM_DBG_BREAKPOINT:
726 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
727 break;
728
729 case VINF_EM_DBG_STOP:
730 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
731 break;
732
733 case VINF_EM_DBG_HYPER_STEPPED:
734 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
735 break;
736
737 case VINF_EM_DBG_HYPER_BREAKPOINT:
738 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
739 break;
740
741 case VINF_EM_DBG_HYPER_ASSERTION:
742 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
743 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
744 break;
745
746 /*
747 * Guru meditation.
748 */
749 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
750 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
751 break;
752 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
753 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
754 break;
755
756 default: /** @todo don't use default for guru, but make special errors code! */
757 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
758 break;
759 }
760
761 /*
762 * Process the result.
763 */
764 do
765 {
766 switch (rc)
767 {
768 /*
769 * Continue the debugging loop.
770 */
771 case VINF_EM_DBG_STEP:
772 case VINF_EM_DBG_STOP:
773 case VINF_EM_DBG_STEPPED:
774 case VINF_EM_DBG_BREAKPOINT:
775 case VINF_EM_DBG_HYPER_STEPPED:
776 case VINF_EM_DBG_HYPER_BREAKPOINT:
777 case VINF_EM_DBG_HYPER_ASSERTION:
778 break;
779
780 /*
781 * Resuming execution (in some form) has to be done here if we got
782 * a hypervisor debug event.
783 */
784 case VINF_SUCCESS:
785 case VINF_EM_RESUME:
786 case VINF_EM_SUSPEND:
787 case VINF_EM_RESCHEDULE:
788 case VINF_EM_RESCHEDULE_RAW:
789 case VINF_EM_RESCHEDULE_REM:
790 case VINF_EM_HALT:
791 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
792 {
793 rc = emR3RawResumeHyper(pVM, pVCpu);
794 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
795 continue;
796 }
797 if (rc == VINF_SUCCESS)
798 rc = VINF_EM_RESCHEDULE;
799 return rc;
800
801 /*
802 * The debugger isn't attached.
803 * We'll simply turn the thing off since that's the easiest thing to do.
804 */
805 case VERR_DBGF_NOT_ATTACHED:
806 switch (rcLast)
807 {
808 case VINF_EM_DBG_HYPER_STEPPED:
809 case VINF_EM_DBG_HYPER_BREAKPOINT:
810 case VINF_EM_DBG_HYPER_ASSERTION:
811 case VERR_TRPM_PANIC:
812 case VERR_TRPM_DONT_PANIC:
813 case VERR_VMM_RING0_ASSERTION:
814 return rcLast;
815 }
816 return VINF_EM_OFF;
817
818 /*
819 * Status codes terminating the VM in one or another sense.
820 */
821 case VINF_EM_TERMINATE:
822 case VINF_EM_OFF:
823 case VINF_EM_RESET:
824 case VINF_EM_NO_MEMORY:
825 case VINF_EM_RAW_STALE_SELECTOR:
826 case VINF_EM_RAW_IRET_TRAP:
827 case VERR_TRPM_PANIC:
828 case VERR_TRPM_DONT_PANIC:
829 case VERR_VMM_RING0_ASSERTION:
830 case VERR_INTERNAL_ERROR:
831 case VERR_INTERNAL_ERROR_2:
832 case VERR_INTERNAL_ERROR_3:
833 case VERR_INTERNAL_ERROR_4:
834 case VERR_INTERNAL_ERROR_5:
835 case VERR_IPE_UNEXPECTED_STATUS:
836 case VERR_IPE_UNEXPECTED_INFO_STATUS:
837 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
838 return rc;
839
840 /*
841 * The rest is unexpected, and will keep us here.
842 */
843 default:
844 AssertMsgFailed(("Unxpected rc %Rrc!\n", rc));
845 break;
846 }
847 } while (false);
848 } /* debug for ever */
849}
850
851/**
852 * Locks REM execution to a single VCpu
853 *
854 * @param pVM VM handle.
855 */
856static void emR3RemLock(PVM pVM)
857{
858 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
859 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
860}
861
862/**
863 * Unlocks REM execution
864 *
865 * @param pVM VM handle.
866 */
867static void emR3RemUnlock(PVM pVM)
868{
869 PDMCritSectLeave(&pVM->em.s.CritSectREM);
870}
871
872/**
873 * Steps recompiled code.
874 *
875 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
876 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
877 *
878 * @param pVM VM handle.
879 * @param pVCpu VMCPU handle.
880 */
881static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
882{
883 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
884
885 emR3RemLock(pVM);
886
887 /*
888 * Switch to REM, step instruction, switch back.
889 */
890 int rc = REMR3State(pVM, pVCpu);
891 if (RT_SUCCESS(rc))
892 {
893 rc = REMR3Step(pVM, pVCpu);
894 REMR3StateBack(pVM, pVCpu);
895 }
896 emR3RemUnlock(pVM);
897
898 LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
899 return rc;
900}
901
902
903/**
904 * Executes recompiled code.
905 *
906 * This function contains the recompiler version of the inner
907 * execution loop (the outer loop being in EMR3ExecuteVM()).
908 *
909 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
910 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
911 *
912 * @param pVM VM handle.
913 * @param pVCpu VMCPU handle.
914 * @param pfFFDone Where to store an indicator telling wheter or not
915 * FFs were done before returning.
916 *
917 */
918static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
919{
920#ifdef LOG_ENABLED
921 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
922 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
923
924 if (pCtx->eflags.Bits.u1VM)
925 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
926 else
927 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0));
928#endif
929 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
930
931#if defined(VBOX_STRICT) && defined(DEBUG_bird)
932 AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
933 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo #1419 - get flat address. */
934 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
935#endif
936
937 /* Big lock, but you are not supposed to own any lock when coming in here. */
938 emR3RemLock(pVM);
939
940 /*
941 * Spin till we get a forced action which returns anything but VINF_SUCCESS
942 * or the REM suggests raw-mode execution.
943 */
944 *pfFFDone = false;
945 bool fInREMState = false;
946 int rc = VINF_SUCCESS;
947
948 /* Flush the recompiler TLB if the VCPU has changed. */
949 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
950 REMFlushTBs(pVM);
951 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
952
953 for (;;)
954 {
955 /*
956 * Update REM state if not already in sync.
957 */
958 if (!fInREMState)
959 {
960 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
961 rc = REMR3State(pVM, pVCpu);
962 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
963 if (RT_FAILURE(rc))
964 break;
965 fInREMState = true;
966
967 /*
968 * We might have missed the raising of VMREQ, TIMER and some other
969 * imporant FFs while we were busy switching the state. So, check again.
970 */
971 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_TIMER | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET)
972 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
973 {
974 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
975 goto l_REMDoForcedActions;
976 }
977 }
978
979
980 /*
981 * Execute REM.
982 */
983 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
984 rc = REMR3Run(pVM, pVCpu);
985 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
986
987
988 /*
989 * Deal with high priority post execution FFs before doing anything else.
990 */
991 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
992 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
993 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
994
995 /*
996 * Process the returned status code.
997 * (Try keep this short! Call functions!)
998 */
999 if (rc != VINF_SUCCESS)
1000 {
1001 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1002 break;
1003 if (rc != VINF_REM_INTERRUPED_FF)
1004 {
1005 /*
1006 * Anything which is not known to us means an internal error
1007 * and the termination of the VM!
1008 */
1009 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1010 break;
1011 }
1012 }
1013
1014
1015 /*
1016 * Check and execute forced actions.
1017 * Sync back the VM state before calling any of these.
1018 */
1019#ifdef VBOX_HIGH_RES_TIMERS_HACK
1020 TMTimerPoll(pVM);
1021#endif
1022 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
1023 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
1024 {
1025l_REMDoForcedActions:
1026 if (fInREMState)
1027 {
1028 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, d);
1029 REMR3StateBack(pVM, pVCpu);
1030 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, d);
1031 fInREMState = false;
1032 }
1033 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1034 rc = emR3ForcedActions(pVM, pVCpu, rc);
1035 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1036 if ( rc != VINF_SUCCESS
1037 && rc != VINF_EM_RESCHEDULE_REM)
1038 {
1039 *pfFFDone = true;
1040 break;
1041 }
1042 }
1043
1044 } /* The Inner Loop, recompiled execution mode version. */
1045
1046
1047 /*
1048 * Returning. Sync back the VM state if required.
1049 */
1050 if (fInREMState)
1051 {
1052 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, e);
1053 REMR3StateBack(pVM, pVCpu);
1054 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, e);
1055 }
1056 emR3RemUnlock(pVM);
1057
1058 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1059 return rc;
1060}
1061
1062
1063/**
1064 * Resumes executing hypervisor after a debug event.
1065 *
1066 * This is kind of special since our current guest state is
1067 * potentially out of sync.
1068 *
1069 * @returns VBox status code.
1070 * @param pVM The VM handle.
1071 * @param pVCpu The VMCPU handle.
1072 */
1073static int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu)
1074{
1075 int rc;
1076 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1077 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER);
1078 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", pCtx->cs, pCtx->eip, pCtx->eflags));
1079
1080 /*
1081 * Resume execution.
1082 */
1083 CPUMRawEnter(pVCpu, NULL);
1084 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF);
1085 rc = VMMR3ResumeHyper(pVM, pVCpu);
1086 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc));
1087 rc = CPUMRawLeave(pVCpu, NULL, rc);
1088 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1089
1090 /*
1091 * Deal with the return code.
1092 */
1093 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1094 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
1095 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1096 return rc;
1097}
1098
1099
1100/**
1101 * Steps rawmode.
1102 *
1103 * @returns VBox status code.
1104 * @param pVM The VM handle.
1105 * @param pVCpu The VMCPU handle.
1106 */
1107static int emR3RawStep(PVM pVM, PVMCPU pVCpu)
1108{
1109 Assert( pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
1110 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
1111 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
1112 int rc;
1113 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1114 bool fGuest = pVCpu->em.s.enmState != EMSTATE_DEBUG_HYPER;
1115#ifndef DEBUG_sandervl
1116 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
1117 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu)));
1118#endif
1119 if (fGuest)
1120 {
1121 /*
1122 * Check vital forced actions, but ignore pending interrupts and timers.
1123 */
1124 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
1125 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1126 {
1127 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
1128 if (rc != VINF_SUCCESS)
1129 return rc;
1130 }
1131
1132 /*
1133 * Set flags for single stepping.
1134 */
1135 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
1136 }
1137 else
1138 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
1139
1140 /*
1141 * Single step.
1142 * We do not start time or anything, if anything we should just do a few nanoseconds.
1143 */
1144 CPUMRawEnter(pVCpu, NULL);
1145 do
1146 {
1147 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1148 rc = VMMR3ResumeHyper(pVM, pVCpu);
1149 else
1150 rc = VMMR3RawRunGC(pVM, pVCpu);
1151#ifndef DEBUG_sandervl
1152 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Rrc\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
1153 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu), rc));
1154#endif
1155 } while ( rc == VINF_SUCCESS
1156 || rc == VINF_EM_RAW_INTERRUPT);
1157 rc = CPUMRawLeave(pVCpu, NULL, rc);
1158 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1159
1160 /*
1161 * Make sure the trap flag is cleared.
1162 * (Too bad if the guest is trying to single step too.)
1163 */
1164 if (fGuest)
1165 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1166 else
1167 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) & ~X86_EFL_TF);
1168
1169 /*
1170 * Deal with the return codes.
1171 */
1172 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1173 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
1174 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1175 return rc;
1176}
1177
1178
1179#ifdef DEBUG
1180
1181/**
1182 * Steps hardware accelerated mode.
1183 *
1184 * @returns VBox status code.
1185 * @param pVM The VM handle.
1186 * @param pVCpu The VMCPU handle.
1187 */
1188static int emR3HwAccStep(PVM pVM, PVMCPU pVCpu)
1189{
1190 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC);
1191
1192 int rc;
1193 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1194 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
1195
1196 /*
1197 * Check vital forced actions, but ignore pending interrupts and timers.
1198 */
1199 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
1200 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1201 {
1202 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
1203 if (rc != VINF_SUCCESS)
1204 return rc;
1205 }
1206 /*
1207 * Set flags for single stepping.
1208 */
1209 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
1210
1211 /*
1212 * Single step.
1213 * We do not start time or anything, if anything we should just do a few nanoseconds.
1214 */
1215 do
1216 {
1217 rc = VMMR3HwAccRunGC(pVM, pVCpu);
1218 } while ( rc == VINF_SUCCESS
1219 || rc == VINF_EM_RAW_INTERRUPT);
1220 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1221
1222 /*
1223 * Make sure the trap flag is cleared.
1224 * (Too bad if the guest is trying to single step too.)
1225 */
1226 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1227
1228 /*
1229 * Deal with the return codes.
1230 */
1231 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1232 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
1233 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1234 return rc;
1235}
1236
1237
1238int emR3SingleStepExecRaw(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1239{
1240 int rc = VINF_SUCCESS;
1241 EMSTATE enmOldState = pVCpu->em.s.enmState;
1242 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1243
1244 Log(("Single step BEGIN:\n"));
1245 for (uint32_t i = 0; i < cIterations; i++)
1246 {
1247 DBGFR3PrgStep(pVCpu);
1248 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1249 rc = emR3RawStep(pVM, pVCpu);
1250 if (rc != VINF_SUCCESS)
1251 break;
1252 }
1253 Log(("Single step END: rc=%Rrc\n", rc));
1254 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1255 pVCpu->em.s.enmState = enmOldState;
1256 return rc;
1257}
1258
1259
1260static int emR3SingleStepExecHwAcc(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1261{
1262 int rc = VINF_SUCCESS;
1263 EMSTATE enmOldState = pVCpu->em.s.enmState;
1264 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HWACC;
1265
1266 Log(("Single step BEGIN:\n"));
1267 for (uint32_t i = 0; i < cIterations; i++)
1268 {
1269 DBGFR3PrgStep(pVCpu);
1270 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1271 rc = emR3HwAccStep(pVM, pVCpu);
1272 if ( rc != VINF_SUCCESS
1273 || !HWACCMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))
1274 break;
1275 }
1276 Log(("Single step END: rc=%Rrc\n", rc));
1277 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1278 pVCpu->em.s.enmState = enmOldState;
1279 return rc == VINF_SUCCESS ? VINF_EM_RESCHEDULE_REM : rc;
1280}
1281
1282
1283static int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1284{
1285 EMSTATE enmOldState = pVCpu->em.s.enmState;
1286
1287 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1288
1289 Log(("Single step BEGIN:\n"));
1290 for (uint32_t i = 0; i < cIterations; i++)
1291 {
1292 DBGFR3PrgStep(pVCpu);
1293 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1294 emR3RemStep(pVM, pVCpu);
1295 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1296 break;
1297 }
1298 Log(("Single step END:\n"));
1299 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1300 pVCpu->em.s.enmState = enmOldState;
1301 return VINF_EM_RESCHEDULE;
1302}
1303
1304#endif /* DEBUG */
1305
1306
1307/**
1308 * Executes one (or perhaps a few more) instruction(s).
1309 *
1310 * @returns VBox status code suitable for EM.
1311 *
1312 * @param pVM VM handle.
1313 * @param pVCpu VMCPU handle
1314 * @param rcGC GC return code
1315 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1316 * instruction and prefix the log output with this text.
1317 */
1318#ifdef LOG_ENABLED
1319static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC, const char *pszPrefix)
1320#else
1321static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
1322#endif
1323{
1324 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1325 int rc;
1326
1327 /*
1328 *
1329 * The simple solution is to use the recompiler.
1330 * The better solution is to disassemble the current instruction and
1331 * try handle as many as possible without using REM.
1332 *
1333 */
1334
1335#ifdef LOG_ENABLED
1336 /*
1337 * Disassemble the instruction if requested.
1338 */
1339 if (pszPrefix)
1340 {
1341 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
1342 DBGFR3DisasInstrCurrentLog(pVM, pszPrefix);
1343 }
1344#endif /* LOG_ENABLED */
1345
1346 /*
1347 * PATM is making life more interesting.
1348 * We cannot hand anything to REM which has an EIP inside patch code. So, we'll
1349 * tell PATM there is a trap in this code and have it take the appropriate actions
1350 * to allow us execute the code in REM.
1351 */
1352 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1353 {
1354 Log(("emR3RawExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)pCtx->eip));
1355
1356 RTGCPTR pNewEip;
1357 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1358 switch (rc)
1359 {
1360 /*
1361 * It's not very useful to emulate a single instruction and then go back to raw
1362 * mode; just execute the whole block until IF is set again.
1363 */
1364 case VINF_SUCCESS:
1365 Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %RGv IF=%d VMIF=%x\n",
1366 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1367 pCtx->eip = pNewEip;
1368 Assert(pCtx->eip);
1369
1370 if (pCtx->eflags.Bits.u1IF)
1371 {
1372 /*
1373 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1374 */
1375 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1376 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1377 }
1378 else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)
1379 {
1380 /* special case: iret, that sets IF, detected a pending irq/event */
1381 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIRET");
1382 }
1383 return VINF_EM_RESCHEDULE_REM;
1384
1385 /*
1386 * One instruction.
1387 */
1388 case VINF_PATCH_EMULATE_INSTR:
1389 Log(("emR3RawExecuteInstruction: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
1390 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1391 pCtx->eip = pNewEip;
1392 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1393
1394 /*
1395 * The patch was disabled, hand it to the REM.
1396 */
1397 case VERR_PATCH_DISABLED:
1398 Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %RGv IF=%d VMIF=%x\n",
1399 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1400 pCtx->eip = pNewEip;
1401 if (pCtx->eflags.Bits.u1IF)
1402 {
1403 /*
1404 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1405 */
1406 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1407 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1408 }
1409 return VINF_EM_RESCHEDULE_REM;
1410
1411 /* Force continued patch exection; usually due to write monitored stack. */
1412 case VINF_PATCH_CONTINUE:
1413 return VINF_SUCCESS;
1414
1415 default:
1416 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap\n", rc));
1417 return VERR_IPE_UNEXPECTED_STATUS;
1418 }
1419 }
1420
1421#if 0
1422 /* Try our own instruction emulator before falling back to the recompiler. */
1423 DISCPUSTATE Cpu;
1424 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
1425 if (RT_SUCCESS(rc))
1426 {
1427 uint32_t size;
1428
1429 switch (Cpu.pCurInstr->opcode)
1430 {
1431 /* @todo we can do more now */
1432 case OP_MOV:
1433 case OP_AND:
1434 case OP_OR:
1435 case OP_XOR:
1436 case OP_POP:
1437 case OP_INC:
1438 case OP_DEC:
1439 case OP_XCHG:
1440 STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
1441 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1442 if (RT_SUCCESS(rc))
1443 {
1444 pCtx->rip += Cpu.opsize;
1445#ifdef EM_NOTIFY_HWACCM
1446 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1447 HWACCMR3NotifyEmulated(pVCpu);
1448#endif
1449 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
1450 return rc;
1451 }
1452 if (rc != VERR_EM_INTERPRETER)
1453 AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
1454 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
1455 break;
1456 }
1457 }
1458#endif /* 0 */
1459 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
1460 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
1461 emR3RemLock(pVM);
1462 rc = REMR3EmulateInstruction(pVM, pVCpu);
1463 emR3RemUnlock(pVM);
1464 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
1465
1466#ifdef EM_NOTIFY_HWACCM
1467 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1468 HWACCMR3NotifyEmulated(pVCpu);
1469#endif
1470 return rc;
1471}
1472
1473
1474/**
1475 * Executes one (or perhaps a few more) instruction(s).
1476 * This is just a wrapper for discarding pszPrefix in non-logging builds.
1477 *
1478 * @returns VBox status code suitable for EM.
1479 * @param pVM VM handle.
1480 * @param pVCpu VMCPU handle.
1481 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1482 * instruction and prefix the log output with this text.
1483 * @param rcGC GC return code
1484 */
1485DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
1486{
1487#ifdef LOG_ENABLED
1488 return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
1489#else
1490 return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC);
1491#endif
1492}
1493
1494/**
1495 * Executes one (or perhaps a few more) IO instruction(s).
1496 *
1497 * @returns VBox status code suitable for EM.
1498 * @param pVM VM handle.
1499 * @param pVCpu VMCPU handle.
1500 */
1501int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
1502{
1503 int rc;
1504 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1505
1506 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
1507
1508 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
1509 * as io instructions tend to come in packages of more than one
1510 */
1511 DISCPUSTATE Cpu;
1512 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
1513 if (RT_SUCCESS(rc))
1514 {
1515 rc = VINF_EM_RAW_EMULATE_INSTR;
1516
1517 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
1518 {
1519 switch (Cpu.pCurInstr->opcode)
1520 {
1521 case OP_IN:
1522 {
1523 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
1524 rc = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1525 break;
1526 }
1527
1528 case OP_OUT:
1529 {
1530 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
1531 rc = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1532 break;
1533 }
1534 }
1535 }
1536 else if (Cpu.prefix & PREFIX_REP)
1537 {
1538 switch (Cpu.pCurInstr->opcode)
1539 {
1540 case OP_INSB:
1541 case OP_INSWD:
1542 {
1543 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
1544 rc = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1545 break;
1546 }
1547
1548 case OP_OUTSB:
1549 case OP_OUTSWD:
1550 {
1551 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
1552 rc = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1553 break;
1554 }
1555 }
1556 }
1557
1558 /*
1559 * Handled the I/O return codes.
1560 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1561 */
1562 if (IOM_SUCCESS(rc))
1563 {
1564 pCtx->rip += Cpu.opsize;
1565 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1566 return rc;
1567 }
1568
1569 if (rc == VINF_EM_RAW_GUEST_TRAP)
1570 {
1571 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1572 rc = emR3RawGuestTrap(pVM, pVCpu);
1573 return rc;
1574 }
1575 AssertMsg(rc != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
1576
1577 if (RT_FAILURE(rc))
1578 {
1579 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1580 return rc;
1581 }
1582 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RESCHEDULE_REM, ("rc=%Rrc\n", rc));
1583 }
1584 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1585 return emR3RawExecuteInstruction(pVM, pVCpu, "IO: ");
1586}
1587
1588
1589/**
1590 * Handle a guest context trap.
1591 *
1592 * @returns VBox status code suitable for EM.
1593 * @param pVM VM handle.
1594 * @param pVCpu VMCPU handle.
1595 */
1596static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)
1597{
1598 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1599
1600 /*
1601 * Get the trap info.
1602 */
1603 uint8_t u8TrapNo;
1604 TRPMEVENT enmType;
1605 RTGCUINT uErrorCode;
1606 RTGCUINTPTR uCR2;
1607 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1608 if (RT_FAILURE(rc))
1609 {
1610 AssertReleaseMsgFailed(("No trap! (rc=%Rrc)\n", rc));
1611 return rc;
1612 }
1613
1614 /*
1615 * Traps can be directly forwarded in hardware accelerated mode.
1616 */
1617 if (HWACCMR3IsActive(pVM))
1618 {
1619#ifdef LOGGING_ENABLED
1620 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1621 DBGFR3DisasInstrCurrentLog(pVM, "Guest trap");
1622#endif
1623 return VINF_EM_RESCHEDULE_HWACC;
1624 }
1625
1626#if 1 /* Experimental: Review, disable if it causes trouble. */
1627 /*
1628 * Handle traps in patch code first.
1629 *
1630 * We catch a few of these cases in RC before returning to R3 (#PF, #GP, #BP)
1631 * but several traps isn't handled specially by TRPM in RC and we end up here
1632 * instead. One example is #DE.
1633 */
1634 uint32_t uCpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
1635 if ( uCpl == 0
1636 && PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
1637 {
1638 LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, pCtx->eip));
1639 return emR3PatchTrap(pVM, pVCpu, pCtx, rc);
1640 }
1641#endif
1642
1643 /*
1644 * If the guest gate is marked unpatched, then we will check again if we can patch it.
1645 * (This assumes that we've already tried and failed to dispatch the trap in
1646 * RC for the gates that already has been patched. Which is true for most high
1647 * volume traps, because these are handled specially, but not for odd ones like #DE.)
1648 */
1649 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) == TRPM_INVALID_HANDLER)
1650 {
1651 CSAMR3CheckGates(pVM, u8TrapNo, 1);
1652 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8TrapNo, TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER));
1653
1654 /* If it was successful, then we could go back to raw mode. */
1655 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER)
1656 {
1657 /* Must check pending forced actions as our IDT or GDT might be out of sync. */
1658 rc = EMR3CheckRawForcedActions(pVM, pVCpu);
1659 AssertRCReturn(rc, rc);
1660
1661 TRPMERRORCODE enmError = uErrorCode != ~0U
1662 ? TRPM_TRAP_HAS_ERRORCODE
1663 : TRPM_TRAP_NO_ERRORCODE;
1664 rc = TRPMForwardTrap(pVCpu, CPUMCTX2CORE(pCtx), u8TrapNo, uErrorCode, enmError, TRPM_TRAP, -1);
1665 if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */)
1666 {
1667 TRPMResetTrap(pVCpu);
1668 return VINF_EM_RESCHEDULE_RAW;
1669 }
1670 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP, ("%Rrc\n", rc));
1671 }
1672 }
1673
1674 /*
1675 * Scan kernel code that traps; we might not get another chance.
1676 */
1677 /** @todo move this up before the dispatching? */
1678 if ( (pCtx->ss & X86_SEL_RPL) <= 1
1679 && !pCtx->eflags.Bits.u1VM)
1680 {
1681 Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
1682 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1683 }
1684
1685 /*
1686 * Trap specific handling.
1687 */
1688 if (u8TrapNo == 6) /* (#UD) Invalid opcode. */
1689 {
1690 /*
1691 * If MONITOR & MWAIT are supported, then interpret them here.
1692 */
1693 DISCPUSTATE cpu;
1694 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): ");
1695 if ( RT_SUCCESS(rc)
1696 && (cpu.pCurInstr->opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))
1697 {
1698 uint32_t u32Dummy, u32Features, u32ExtFeatures;
1699 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Features);
1700 if (u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)
1701 {
1702 rc = TRPMResetTrap(pVCpu);
1703 AssertRC(rc);
1704
1705 uint32_t opsize;
1706 rc = EMInterpretInstructionCPU(pVM, pVCpu, &cpu, CPUMCTX2CORE(pCtx), 0, &opsize);
1707 if (RT_SUCCESS(rc))
1708 {
1709 pCtx->rip += cpu.opsize;
1710#ifdef EM_NOTIFY_HWACCM
1711 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1712 HWACCMR3NotifyEmulated(pVCpu);
1713#endif
1714 return rc;
1715 }
1716 return emR3RawExecuteInstruction(pVM, pVCpu, "Monitor: ");
1717 }
1718 }
1719 }
1720 else if (u8TrapNo == 13) /* (#GP) Privileged exception */
1721 {
1722 /*
1723 * Handle I/O bitmap?
1724 */
1725 /** @todo We're not supposed to be here with a false guest trap concerning
1726 * I/O access. We can easily handle those in RC. */
1727 DISCPUSTATE cpu;
1728 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap: ");
1729 if ( RT_SUCCESS(rc)
1730 && (cpu.pCurInstr->optype & OPTYPE_PORTIO))
1731 {
1732 /*
1733 * We should really check the TSS for the IO bitmap, but it's not like this
1734 * lazy approach really makes things worse.
1735 */
1736 rc = TRPMResetTrap(pVCpu);
1737 AssertRC(rc);
1738 return emR3RawExecuteInstruction(pVM, pVCpu, "IO Guest Trap: ");
1739 }
1740 }
1741
1742#ifdef LOG_ENABLED
1743 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1744 DBGFR3DisasInstrCurrentLog(pVM, "Guest trap");
1745
1746 /* Get guest page information. */
1747 uint64_t fFlags = 0;
1748 RTGCPHYS GCPhys = 0;
1749 int rc2 = PGMGstGetPage(pVCpu, uCR2, &fFlags, &GCPhys);
1750 Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%RGp fFlags=%08llx %s %s %s%s rc2=%d\n",
1751 pCtx->cs, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0, (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags,
1752 fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S",
1753 fFlags & X86_PTE_RW ? "RW" : "R0", fFlags & X86_PTE_G ? " G" : "", rc2));
1754#endif
1755
1756 /*
1757 * #PG has CR2.
1758 * (Because of stuff like above we must set CR2 in a delayed fashion.)
1759 */
1760 if (u8TrapNo == 14 /* #PG */)
1761 pCtx->cr2 = uCR2;
1762
1763 return VINF_EM_RESCHEDULE_REM;
1764}
1765
1766
1767/**
1768 * Handle a ring switch trap.
1769 * Need to do statistics and to install patches. The result is going to REM.
1770 *
1771 * @returns VBox status code suitable for EM.
1772 * @param pVM VM handle.
1773 * @param pVCpu VMCPU handle.
1774 */
1775int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu)
1776{
1777 int rc;
1778 DISCPUSTATE Cpu;
1779 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1780
1781 /*
1782 * sysenter, syscall & callgate
1783 */
1784 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "RSWITCH: ");
1785 if (RT_SUCCESS(rc))
1786 {
1787 if (Cpu.pCurInstr->opcode == OP_SYSENTER)
1788 {
1789 if (pCtx->SysEnter.cs != 0)
1790 {
1791 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
1792 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
1793 if (RT_SUCCESS(rc))
1794 {
1795 DBGFR3DisasInstrCurrentLog(pVM, "Patched sysenter instruction");
1796 return VINF_EM_RESCHEDULE_RAW;
1797 }
1798 }
1799 }
1800
1801#ifdef VBOX_WITH_STATISTICS
1802 switch (Cpu.pCurInstr->opcode)
1803 {
1804 case OP_SYSENTER:
1805 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysEnter);
1806 break;
1807 case OP_SYSEXIT:
1808 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysExit);
1809 break;
1810 case OP_SYSCALL:
1811 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysCall);
1812 break;
1813 case OP_SYSRET:
1814 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysRet);
1815 break;
1816 }
1817#endif
1818 }
1819 else
1820 AssertRC(rc);
1821
1822 /* go to the REM to emulate a single instruction */
1823 return emR3RawExecuteInstruction(pVM, pVCpu, "RSWITCH: ");
1824}
1825
1826
1827/**
1828 * Handle a trap (\#PF or \#GP) in patch code
1829 *
1830 * @returns VBox status code suitable for EM.
1831 * @param pVM VM handle.
1832 * @param pVCpu VMCPU handle.
1833 * @param pCtx CPU context
1834 * @param gcret GC return code
1835 */
1836static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
1837{
1838 uint8_t u8TrapNo;
1839 int rc;
1840 TRPMEVENT enmType;
1841 RTGCUINT uErrorCode;
1842 RTGCUINTPTR uCR2;
1843
1844 Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));
1845
1846 if (gcret == VINF_PATM_PATCH_INT3)
1847 {
1848 u8TrapNo = 3;
1849 uCR2 = 0;
1850 uErrorCode = 0;
1851 }
1852 else if (gcret == VINF_PATM_PATCH_TRAP_GP)
1853 {
1854 /* No active trap in this case. Kind of ugly. */
1855 u8TrapNo = X86_XCPT_GP;
1856 uCR2 = 0;
1857 uErrorCode = 0;
1858 }
1859 else
1860 {
1861 rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1862 if (RT_FAILURE(rc))
1863 {
1864 AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Rrc) gcret=%Rrc\n", rc, gcret));
1865 return rc;
1866 }
1867 /* Reset the trap as we'll execute the original instruction again. */
1868 TRPMResetTrap(pVCpu);
1869 }
1870
1871 /*
1872 * Deal with traps inside patch code.
1873 * (This code won't run outside GC.)
1874 */
1875 if (u8TrapNo != 1)
1876 {
1877#ifdef LOG_ENABLED
1878 DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");
1879 DBGFR3DisasInstrCurrentLog(pVM, "Patch code");
1880
1881 DISCPUSTATE Cpu;
1882 int rc;
1883
1884 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->eip, &Cpu, "Patch code: ");
1885 if ( RT_SUCCESS(rc)
1886 && Cpu.pCurInstr->opcode == OP_IRET)
1887 {
1888 uint32_t eip, selCS, uEFlags;
1889
1890 /* Iret crashes are bad as we have already changed the flags on the stack */
1891 rc = PGMPhysSimpleReadGCPtr(pVCpu, &eip, pCtx->esp, 4);
1892 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selCS, pCtx->esp+4, 4);
1893 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &uEFlags, pCtx->esp+8, 4);
1894 if (rc == VINF_SUCCESS)
1895 {
1896 if ( (uEFlags & X86_EFL_VM)
1897 || (selCS & X86_SEL_RPL) == 3)
1898 {
1899 uint32_t selSS, esp;
1900
1901 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &esp, pCtx->esp + 12, 4);
1902 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selSS, pCtx->esp + 16, 4);
1903
1904 if (uEFlags & X86_EFL_VM)
1905 {
1906 uint32_t selDS, selES, selFS, selGS;
1907 rc = PGMPhysSimpleReadGCPtr(pVCpu, &selES, pCtx->esp + 20, 4);
1908 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selDS, pCtx->esp + 24, 4);
1909 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selFS, pCtx->esp + 28, 4);
1910 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selGS, pCtx->esp + 32, 4);
1911 if (rc == VINF_SUCCESS)
1912 {
1913 Log(("Patch code: IRET->VM stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
1914 Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
1915 }
1916 }
1917 else
1918 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
1919 }
1920 else
1921 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x\n", selCS, eip, uEFlags));
1922 }
1923 }
1924#endif /* LOG_ENABLED */
1925 Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
1926 pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));
1927
1928 RTGCPTR pNewEip;
1929 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1930 switch (rc)
1931 {
1932 /*
1933 * Execute the faulting instruction.
1934 */
1935 case VINF_SUCCESS:
1936 {
1937 /** @todo execute a whole block */
1938 Log(("emR3PatchTrap: Executing faulting instruction at new address %RGv\n", pNewEip));
1939 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1940 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1941
1942 pCtx->eip = pNewEip;
1943 AssertRelease(pCtx->eip);
1944
1945 if (pCtx->eflags.Bits.u1IF)
1946 {
1947 /* Windows XP lets irets fault intentionally and then takes action based on the opcode; an
1948 * int3 patch overwrites it and leads to blue screens. Remove the patch in this case.
1949 */
1950 if ( u8TrapNo == X86_XCPT_GP
1951 && PATMIsInt3Patch(pVM, pCtx->eip, NULL, NULL))
1952 {
1953 /** @todo move to PATMR3HandleTrap */
1954 Log(("Possible Windows XP iret fault at %08RX32\n", pCtx->eip));
1955 PATMR3RemovePatch(pVM, pCtx->eip);
1956 }
1957
1958 /** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */
1959 /* Note: possibly because a reschedule is required (e.g. iret to V86 code) */
1960
1961 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1962 /* Interrupts are enabled; just go back to the original instruction.
1963 return VINF_SUCCESS; */
1964 }
1965 return VINF_EM_RESCHEDULE_REM;
1966 }
1967
1968 /*
1969 * One instruction.
1970 */
1971 case VINF_PATCH_EMULATE_INSTR:
1972 Log(("emR3PatchTrap: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
1973 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1974 pCtx->eip = pNewEip;
1975 AssertRelease(pCtx->eip);
1976 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHEMUL: ");
1977
1978 /*
1979 * The patch was disabled, hand it to the REM.
1980 */
1981 case VERR_PATCH_DISABLED:
1982 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1983 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1984 pCtx->eip = pNewEip;
1985 AssertRelease(pCtx->eip);
1986
1987 if (pCtx->eflags.Bits.u1IF)
1988 {
1989 /*
1990 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1991 */
1992 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1993 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1994 }
1995 return VINF_EM_RESCHEDULE_REM;
1996
1997 /* Force continued patch exection; usually due to write monitored stack. */
1998 case VINF_PATCH_CONTINUE:
1999 return VINF_SUCCESS;
2000
2001 /*
2002 * Anything else is *fatal*.
2003 */
2004 default:
2005 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap!\n", rc));
2006 return VERR_IPE_UNEXPECTED_STATUS;
2007 }
2008 }
2009 return VINF_SUCCESS;
2010}
2011
2012
2013/**
2014 * Handle a privileged instruction.
2015 *
2016 * @returns VBox status code suitable for EM.
2017 * @param pVM VM handle.
2018 * @param pVCpu VMCPU handle;
2019 */
2020int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
2021{
2022 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);
2023 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2024
2025 Assert(!pCtx->eflags.Bits.u1VM);
2026
2027 if (PATMIsEnabled(pVM))
2028 {
2029 /*
2030 * Check if in patch code.
2031 */
2032 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2033 {
2034#ifdef LOG_ENABLED
2035 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2036#endif
2037 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));
2038 return VERR_EM_RAW_PATCH_CONFLICT;
2039 }
2040 if ( (pCtx->ss & X86_SEL_RPL) == 0
2041 && !pCtx->eflags.Bits.u1VM
2042 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
2043 {
2044 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
2045 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
2046 if (RT_SUCCESS(rc))
2047 {
2048#ifdef LOG_ENABLED
2049 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2050#endif
2051 DBGFR3DisasInstrCurrentLog(pVM, "Patched privileged instruction");
2052 return VINF_SUCCESS;
2053 }
2054 }
2055 }
2056
2057#ifdef LOG_ENABLED
2058 if (!PATMIsPatchGCAddr(pVM, pCtx->eip))
2059 {
2060 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2061 DBGFR3DisasInstrCurrentLog(pVM, "Privileged instr: ");
2062 }
2063#endif
2064
2065 /*
2066 * Instruction statistics and logging.
2067 */
2068 DISCPUSTATE Cpu;
2069 int rc;
2070
2071 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "PRIV: ");
2072 if (RT_SUCCESS(rc))
2073 {
2074#ifdef VBOX_WITH_STATISTICS
2075 PEMSTATS pStats = pVCpu->em.s.CTX_SUFF(pStats);
2076 switch (Cpu.pCurInstr->opcode)
2077 {
2078 case OP_INVLPG:
2079 STAM_COUNTER_INC(&pStats->StatInvlpg);
2080 break;
2081 case OP_IRET:
2082 STAM_COUNTER_INC(&pStats->StatIret);
2083 break;
2084 case OP_CLI:
2085 STAM_COUNTER_INC(&pStats->StatCli);
2086 emR3RecordCli(pVM, pVCpu, pCtx->rip);
2087 break;
2088 case OP_STI:
2089 STAM_COUNTER_INC(&pStats->StatSti);
2090 break;
2091 case OP_INSB:
2092 case OP_INSWD:
2093 case OP_IN:
2094 case OP_OUTSB:
2095 case OP_OUTSWD:
2096 case OP_OUT:
2097 AssertMsgFailed(("Unexpected privileged exception due to port IO\n"));
2098 break;
2099
2100 case OP_MOV_CR:
2101 if (Cpu.param1.flags & USE_REG_GEN32)
2102 {
2103 //read
2104 Assert(Cpu.param2.flags & USE_REG_CR);
2105 Assert(Cpu.param2.base.reg_ctrl <= USE_REG_CR4);
2106 STAM_COUNTER_INC(&pStats->StatMovReadCR[Cpu.param2.base.reg_ctrl]);
2107 }
2108 else
2109 {
2110 //write
2111 Assert(Cpu.param1.flags & USE_REG_CR);
2112 Assert(Cpu.param1.base.reg_ctrl <= USE_REG_CR4);
2113 STAM_COUNTER_INC(&pStats->StatMovWriteCR[Cpu.param1.base.reg_ctrl]);
2114 }
2115 break;
2116
2117 case OP_MOV_DR:
2118 STAM_COUNTER_INC(&pStats->StatMovDRx);
2119 break;
2120 case OP_LLDT:
2121 STAM_COUNTER_INC(&pStats->StatMovLldt);
2122 break;
2123 case OP_LIDT:
2124 STAM_COUNTER_INC(&pStats->StatMovLidt);
2125 break;
2126 case OP_LGDT:
2127 STAM_COUNTER_INC(&pStats->StatMovLgdt);
2128 break;
2129 case OP_SYSENTER:
2130 STAM_COUNTER_INC(&pStats->StatSysEnter);
2131 break;
2132 case OP_SYSEXIT:
2133 STAM_COUNTER_INC(&pStats->StatSysExit);
2134 break;
2135 case OP_SYSCALL:
2136 STAM_COUNTER_INC(&pStats->StatSysCall);
2137 break;
2138 case OP_SYSRET:
2139 STAM_COUNTER_INC(&pStats->StatSysRet);
2140 break;
2141 case OP_HLT:
2142 STAM_COUNTER_INC(&pStats->StatHlt);
2143 break;
2144 default:
2145 STAM_COUNTER_INC(&pStats->StatMisc);
2146 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->opcode));
2147 break;
2148 }
2149#endif /* VBOX_WITH_STATISTICS */
2150 if ( (pCtx->ss & X86_SEL_RPL) == 0
2151 && !pCtx->eflags.Bits.u1VM
2152 && SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT)
2153 {
2154 uint32_t size;
2155
2156 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);
2157 switch (Cpu.pCurInstr->opcode)
2158 {
2159 case OP_CLI:
2160 pCtx->eflags.u32 &= ~X86_EFL_IF;
2161 Assert(Cpu.opsize == 1);
2162 pCtx->rip += Cpu.opsize;
2163 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2164 return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */
2165
2166 case OP_STI:
2167 pCtx->eflags.u32 |= X86_EFL_IF;
2168 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + Cpu.opsize);
2169 Assert(Cpu.opsize == 1);
2170 pCtx->rip += Cpu.opsize;
2171 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2172 return VINF_SUCCESS;
2173
2174 case OP_HLT:
2175 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
2176 {
2177 PATMTRANSSTATE enmState;
2178 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
2179
2180 if (enmState == PATMTRANS_OVERWRITTEN)
2181 {
2182 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
2183 Assert(rc == VERR_PATCH_DISABLED);
2184 /* Conflict detected, patch disabled */
2185 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %08RX32\n", pCtx->eip));
2186
2187 enmState = PATMTRANS_SAFE;
2188 }
2189
2190 /* The translation had better be successful. Otherwise we can't recover. */
2191 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %08RX32\n", pCtx->eip));
2192 if (enmState != PATMTRANS_OVERWRITTEN)
2193 pCtx->eip = pOrgInstrGC;
2194 }
2195 /* no break; we could just return VINF_EM_HALT here */
2196
2197 case OP_MOV_CR:
2198 case OP_MOV_DR:
2199#ifdef LOG_ENABLED
2200 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2201 {
2202 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2203 DBGFR3DisasInstrCurrentLog(pVM, "Privileged instr: ");
2204 }
2205#endif
2206
2207 rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
2208 if (RT_SUCCESS(rc))
2209 {
2210 pCtx->rip += Cpu.opsize;
2211#ifdef EM_NOTIFY_HWACCM
2212 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
2213 HWACCMR3NotifyEmulated(pVCpu);
2214#endif
2215 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2216
2217 if ( Cpu.pCurInstr->opcode == OP_MOV_CR
2218 && Cpu.param1.flags == USE_REG_CR /* write */
2219 )
2220 {
2221 /* Deal with CR0 updates inside patch code that force
2222 * us to go to the recompiler.
2223 */
2224 if ( PATMIsPatchGCAddr(pVM, pCtx->rip)
2225 && (pCtx->cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE))
2226 {
2227 PATMTRANSSTATE enmState;
2228 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->rip, &enmState);
2229
2230 Log(("Force recompiler switch due to cr0 (%RGp) update rip=%RGv -> %RGv (enmState=%d)\n", pCtx->cr0, pCtx->rip, pOrgInstrGC, enmState));
2231 if (enmState == PATMTRANS_OVERWRITTEN)
2232 {
2233 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
2234 Assert(rc == VERR_PATCH_DISABLED);
2235 /* Conflict detected, patch disabled */
2236 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %RGv\n", (RTGCPTR)pCtx->rip));
2237 enmState = PATMTRANS_SAFE;
2238 }
2239 /* The translation had better be successful. Otherwise we can't recover. */
2240 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %RGv\n", (RTGCPTR)pCtx->rip));
2241 if (enmState != PATMTRANS_OVERWRITTEN)
2242 pCtx->rip = pOrgInstrGC;
2243 }
2244
2245 /* Reschedule is necessary as the execution/paging mode might have changed. */
2246 return VINF_EM_RESCHEDULE;
2247 }
2248 return rc; /* can return VINF_EM_HALT as well. */
2249 }
2250 AssertMsgReturn(rc == VERR_EM_INTERPRETER, ("%Rrc\n", rc), rc);
2251 break; /* fall back to the recompiler */
2252 }
2253 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2254 }
2255 }
2256
2257 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2258 return emR3PatchTrap(pVM, pVCpu, pCtx, VINF_PATM_PATCH_TRAP_GP);
2259
2260 return emR3RawExecuteInstruction(pVM, pVCpu, "PRIV");
2261}
2262
2263
2264/**
2265 * Update the forced rawmode execution modifier.
2266 *
2267 * This function is called when we're returning from the raw-mode loop(s). If we're
2268 * in patch code, it will set a flag forcing execution to be resumed in raw-mode,
2269 * if not in patch code, the flag will be cleared.
2270 *
2271 * We should never interrupt patch code while it's being executed. Cli patches can
2272 * contain big code blocks, but they are always executed with IF=0. Other patches
2273 * replace single instructions and should be atomic.
2274 *
2275 * @returns Updated rc.
2276 *
2277 * @param pVM The VM handle.
2278 * @param pVCpu The VMCPU handle.
2279 * @param pCtx The guest CPU context.
2280 * @param rc The result code.
2281 */
2282DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
2283{
2284 if (PATMIsPatchGCAddr(pVM, pCtx->eip)) /** @todo check cs selector base/type */
2285 {
2286 /* ignore reschedule attempts. */
2287 switch (rc)
2288 {
2289 case VINF_EM_RESCHEDULE:
2290 case VINF_EM_RESCHEDULE_REM:
2291 LogFlow(("emR3RawUpdateForceFlag: patch address -> force raw reschedule\n"));
2292 rc = VINF_SUCCESS;
2293 break;
2294 }
2295 pVCpu->em.s.fForceRAW = true;
2296 }
2297 else
2298 pVCpu->em.s.fForceRAW = false;
2299 return rc;
2300}
2301
2302
2303/**
2304 * Process a subset of the raw-mode return code.
2305 *
2306 * Since we have to share this with raw-mode single stepping, this inline
2307 * function has been created to avoid code duplication.
2308 *
2309 * @returns VINF_SUCCESS if it's ok to continue raw mode.
2310 * @returns VBox status code to return to the EM main loop.
2311 *
2312 * @param pVM The VM handle
2313 * @param pVCpu The VMCPU handle
2314 * @param rc The return code.
2315 * @param pCtx The guest cpu context.
2316 */
2317DECLINLINE(int) emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
2318{
2319 switch (rc)
2320 {
2321 /*
2322 * Common & simple ones.
2323 */
2324 case VINF_SUCCESS:
2325 break;
2326 case VINF_EM_RESCHEDULE_RAW:
2327 case VINF_EM_RESCHEDULE_HWACC:
2328 case VINF_EM_RAW_INTERRUPT:
2329 case VINF_EM_RAW_TO_R3:
2330 case VINF_EM_RAW_TIMER_PENDING:
2331 case VINF_EM_PENDING_REQUEST:
2332 rc = VINF_SUCCESS;
2333 break;
2334
2335 /*
2336 * Privileged instruction.
2337 */
2338 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2339 case VINF_PATM_PATCH_TRAP_GP:
2340 rc = emR3RawPrivileged(pVM, pVCpu);
2341 break;
2342
2343 /*
2344 * Got a trap which needs dispatching.
2345 */
2346 case VINF_EM_RAW_GUEST_TRAP:
2347 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2348 {
2349 AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVCpu)));
2350 rc = VERR_EM_RAW_PATCH_CONFLICT;
2351 break;
2352 }
2353 rc = emR3RawGuestTrap(pVM, pVCpu);
2354 break;
2355
2356 /*
2357 * Trap in patch code.
2358 */
2359 case VINF_PATM_PATCH_TRAP_PF:
2360 case VINF_PATM_PATCH_INT3:
2361 rc = emR3PatchTrap(pVM, pVCpu, pCtx, rc);
2362 break;
2363
2364 case VINF_PATM_DUPLICATE_FUNCTION:
2365 Assert(PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2366 rc = PATMR3DuplicateFunctionRequest(pVM, pCtx);
2367 AssertRC(rc);
2368 rc = VINF_SUCCESS;
2369 break;
2370
2371 case VINF_PATM_CHECK_PATCH_PAGE:
2372 rc = PATMR3HandleMonitoredPage(pVM);
2373 AssertRC(rc);
2374 rc = VINF_SUCCESS;
2375 break;
2376
2377 /*
2378 * Patch manager.
2379 */
2380 case VERR_EM_RAW_PATCH_CONFLICT:
2381 AssertReleaseMsgFailed(("%Rrc handling is not yet implemented\n", rc));
2382 break;
2383
2384#ifdef VBOX_WITH_VMI
2385 /*
2386 * PARAV function.
2387 */
2388 case VINF_EM_RESCHEDULE_PARAV:
2389 rc = PARAVCallFunction(pVM);
2390 break;
2391#endif
2392
2393 /*
2394 * Memory mapped I/O access - attempt to patch the instruction
2395 */
2396 case VINF_PATM_HC_MMIO_PATCH_READ:
2397 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
2398 PATMFL_MMIO_ACCESS | ((SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0));
2399 if (RT_FAILURE(rc))
2400 rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
2401 break;
2402
2403 case VINF_PATM_HC_MMIO_PATCH_WRITE:
2404 AssertFailed(); /* not yet implemented. */
2405 rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
2406 break;
2407
2408 /*
2409 * Conflict or out of page tables.
2410 *
2411 * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
2412 * do here is to execute the pending forced actions.
2413 */
2414 case VINF_PGM_SYNC_CR3:
2415 AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
2416 ("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
2417 rc = VINF_SUCCESS;
2418 break;
2419
2420 /*
2421 * Paging mode change.
2422 */
2423 case VINF_PGM_CHANGE_MODE:
2424 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2425 if (rc == VINF_SUCCESS)
2426 rc = VINF_EM_RESCHEDULE;
2427 AssertMsg(RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST), ("%Rrc\n", rc));
2428 break;
2429
2430 /*
2431 * CSAM wants to perform a task in ring-3. It has set an FF action flag.
2432 */
2433 case VINF_CSAM_PENDING_ACTION:
2434 rc = VINF_SUCCESS;
2435 break;
2436
2437 /*
2438 * Invoked Interrupt gate - must directly (!) go to the recompiler.
2439 */
2440 case VINF_EM_RAW_INTERRUPT_PENDING:
2441 case VINF_EM_RAW_RING_SWITCH_INT:
2442 Assert(TRPMHasTrap(pVCpu));
2443 Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2444
2445 if (TRPMHasTrap(pVCpu))
2446 {
2447 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
2448 uint8_t u8Interrupt = TRPMGetTrapNo(pVCpu);
2449 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
2450 {
2451 CSAMR3CheckGates(pVM, u8Interrupt, 1);
2452 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
2453 /* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
2454 }
2455 }
2456 rc = VINF_EM_RESCHEDULE_REM;
2457 break;
2458
2459 /*
2460 * Other ring switch types.
2461 */
2462 case VINF_EM_RAW_RING_SWITCH:
2463 rc = emR3RawRingSwitch(pVM, pVCpu);
2464 break;
2465
2466 /*
2467 * REMGCNotifyInvalidatePage() failed because of overflow.
2468 */
2469 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
2470 Assert((pCtx->ss & X86_SEL_RPL) != 1);
2471 emR3RemLock(pVM);
2472 REMR3ReplayInvalidatedPages(pVM, pVCpu);
2473 emR3RemUnlock(pVM);
2474 rc = VINF_SUCCESS;
2475 break;
2476
2477 /*
2478 * I/O Port access - emulate the instruction.
2479 */
2480 case VINF_IOM_HC_IOPORT_READ:
2481 case VINF_IOM_HC_IOPORT_WRITE:
2482 rc = emR3RawExecuteIOInstruction(pVM, pVCpu);
2483 break;
2484
2485 /*
2486 * Memory mapped I/O access - emulate the instruction.
2487 */
2488 case VINF_IOM_HC_MMIO_READ:
2489 case VINF_IOM_HC_MMIO_WRITE:
2490 case VINF_IOM_HC_MMIO_READ_WRITE:
2491 rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
2492 break;
2493
2494 /*
2495 * (MM)IO intensive code block detected; fall back to the recompiler for better performance
2496 */
2497 case VINF_EM_RAW_EMULATE_IO_BLOCK:
2498 rc = HWACCMR3EmulateIoBlock(pVM, pCtx);
2499 break;
2500
2501 /*
2502 * Execute instruction.
2503 */
2504 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
2505 rc = emR3RawExecuteInstruction(pVM, pVCpu, "LDT FAULT: ");
2506 break;
2507 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
2508 rc = emR3RawExecuteInstruction(pVM, pVCpu, "GDT FAULT: ");
2509 break;
2510 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
2511 rc = emR3RawExecuteInstruction(pVM, pVCpu, "IDT FAULT: ");
2512 break;
2513 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
2514 rc = emR3RawExecuteInstruction(pVM, pVCpu, "TSS FAULT: ");
2515 break;
2516 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
2517 rc = emR3RawExecuteInstruction(pVM, pVCpu, "PD FAULT: ");
2518 break;
2519
2520 case VINF_EM_RAW_EMULATE_INSTR_HLT:
2521 /** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */
2522 rc = emR3RawPrivileged(pVM, pVCpu);
2523 break;
2524
2525 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
2526 rc = emR3RawExecuteInstruction(pVM, pVCpu, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
2527 break;
2528
2529 case VINF_EM_RAW_EMULATE_INSTR:
2530 case VINF_PATCH_EMULATE_INSTR:
2531 rc = emR3RawExecuteInstruction(pVM, pVCpu, "EMUL: ");
2532 break;
2533
2534 /*
2535 * Stale selector and iret traps => REM.
2536 */
2537 case VINF_EM_RAW_STALE_SELECTOR:
2538 case VINF_EM_RAW_IRET_TRAP:
2539 /* We will not go to the recompiler if EIP points to patch code. */
2540 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2541 {
2542 pCtx->eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pCtx->eip, 0);
2543 }
2544 LogFlow(("emR3RawHandleRC: %Rrc -> %Rrc\n", rc, VINF_EM_RESCHEDULE_REM));
2545 rc = VINF_EM_RESCHEDULE_REM;
2546 break;
2547
2548 /*
2549 * Up a level.
2550 */
2551 case VINF_EM_TERMINATE:
2552 case VINF_EM_OFF:
2553 case VINF_EM_RESET:
2554 case VINF_EM_SUSPEND:
2555 case VINF_EM_HALT:
2556 case VINF_EM_RESUME:
2557 case VINF_EM_NO_MEMORY:
2558 case VINF_EM_RESCHEDULE:
2559 case VINF_EM_RESCHEDULE_REM:
2560 break;
2561
2562 /*
2563 * Up a level and invoke the debugger.
2564 */
2565 case VINF_EM_DBG_STEPPED:
2566 case VINF_EM_DBG_BREAKPOINT:
2567 case VINF_EM_DBG_STEP:
2568 case VINF_EM_DBG_HYPER_BREAKPOINT:
2569 case VINF_EM_DBG_HYPER_STEPPED:
2570 case VINF_EM_DBG_HYPER_ASSERTION:
2571 case VINF_EM_DBG_STOP:
2572 break;
2573
2574 /*
2575 * Up a level, dump and debug.
2576 */
2577 case VERR_TRPM_DONT_PANIC:
2578 case VERR_TRPM_PANIC:
2579 case VERR_VMM_RING0_ASSERTION:
2580 break;
2581
2582 /*
2583 * Up a level, after HwAccM have done some release logging.
2584 */
2585 case VERR_VMX_INVALID_VMCS_FIELD:
2586 case VERR_VMX_INVALID_VMCS_PTR:
2587 case VERR_VMX_INVALID_VMXON_PTR:
2588 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE:
2589 case VERR_VMX_UNEXPECTED_EXCEPTION:
2590 case VERR_VMX_UNEXPECTED_EXIT_CODE:
2591 case VERR_VMX_INVALID_GUEST_STATE:
2592 case VERR_VMX_UNABLE_TO_START_VM:
2593 case VERR_VMX_UNABLE_TO_RESUME_VM:
2594 HWACCMR3CheckError(pVM, rc);
2595 break;
2596 /*
2597 * Anything which is not known to us means an internal error
2598 * and the termination of the VM!
2599 */
2600 default:
2601 AssertMsgFailed(("Unknown GC return code: %Rra\n", rc));
2602 break;
2603 }
2604 return rc;
2605}
2606
2607
2608/**
2609 * Check for pending raw actions
2610 *
2611 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
2612 * EM statuses.
2613 * @param pVM The VM to operate on.
2614 * @param pVCpu The VMCPU handle.
2615 */
2616VMMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM, PVMCPU pVCpu)
2617{
2618 return emR3RawForcedActions(pVM, pVCpu, pVCpu->em.s.pCtx);
2619}
2620
2621
2622/**
2623 * Process raw-mode specific forced actions.
2624 *
2625 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
2626 *
2627 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
2628 * EM statuses.
2629 * @param pVM The VM handle.
2630 * @param pVCpu The VMCPU handle.
2631 * @param pCtx The guest CPUM register context.
2632 */
2633static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2634{
2635 /*
2636 * Note that the order is *vitally* important!
2637 * Also note that SELMR3UpdateFromCPUM may trigger VM_FF_SELM_SYNC_TSS.
2638 */
2639
2640
2641 /*
2642 * Sync selector tables.
2643 */
2644 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))
2645 {
2646 int rc = SELMR3UpdateFromCPUM(pVM, pVCpu);
2647 if (RT_FAILURE(rc))
2648 return rc;
2649 }
2650
2651 /*
2652 * Sync IDT.
2653 *
2654 * The CSAMR3CheckGates call in TRPMR3SyncIDT may call PGMPrefetchPage
2655 * and PGMShwModifyPage, so we're in for trouble if for instance a
2656 * PGMSyncCR3+pgmPoolClearAll is pending.
2657 */
2658 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))
2659 {
2660 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
2661 && EMIsRawRing0Enabled(pVM)
2662 && CSAMIsEnabled(pVM))
2663 {
2664 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2665 if (RT_FAILURE(rc))
2666 return rc;
2667 }
2668
2669 int rc = TRPMR3SyncIDT(pVM, pVCpu);
2670 if (RT_FAILURE(rc))
2671 return rc;
2672 }
2673
2674 /*
2675 * Sync TSS.
2676 */
2677 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
2678 {
2679 int rc = SELMR3SyncTSS(pVM, pVCpu);
2680 if (RT_FAILURE(rc))
2681 return rc;
2682 }
2683
2684 /*
2685 * Sync page directory.
2686 */
2687 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
2688 {
2689 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2690 if (RT_FAILURE(rc))
2691 return rc;
2692
2693 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
2694
2695 /* Prefetch pages for EIP and ESP. */
2696 /** @todo This is rather expensive. Should investigate if it really helps at all. */
2697 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
2698 if (rc == VINF_SUCCESS)
2699 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
2700 if (rc != VINF_SUCCESS)
2701 {
2702 if (rc != VINF_PGM_SYNC_CR3)
2703 {
2704 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2705 return rc;
2706 }
2707 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2708 if (RT_FAILURE(rc))
2709 return rc;
2710 }
2711 /** @todo maybe prefetch the supervisor stack page as well */
2712 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
2713 }
2714
2715 /*
2716 * Allocate handy pages (just in case the above actions have consumed some pages).
2717 */
2718 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2719 {
2720 int rc = PGMR3PhysAllocateHandyPages(pVM);
2721 if (RT_FAILURE(rc))
2722 return rc;
2723 }
2724
2725 /*
2726 * Check whether we're out of memory now.
2727 *
2728 * This may stem from some of the above actions or operations that has been executed
2729 * since we ran FFs. The allocate handy pages must for instance always be followed by
2730 * this check.
2731 */
2732 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
2733 return VINF_EM_NO_MEMORY;
2734
2735 return VINF_SUCCESS;
2736}
2737
2738
2739/**
2740 * Executes raw code.
2741 *
2742 * This function contains the raw-mode version of the inner
2743 * execution loop (the outer loop being in EMR3ExecuteVM()).
2744 *
2745 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
2746 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2747 *
2748 * @param pVM VM handle.
2749 * @param pVCpu VMCPU handle.
2750 * @param pfFFDone Where to store an indicator telling whether or not
2751 * FFs were done before returning.
2752 */
2753static int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
2754{
2755 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTotal, a);
2756
2757 int rc = VERR_INTERNAL_ERROR;
2758 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2759 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));
2760 pVCpu->em.s.fForceRAW = false;
2761 *pfFFDone = false;
2762
2763
2764 /*
2765 *
2766 * Spin till we get a forced action or raw mode status code resulting in
2767 * in anything but VINF_SUCCESS or VINF_EM_RESCHEDULE_RAW.
2768 *
2769 */
2770 for (;;)
2771 {
2772 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWEntry, b);
2773
2774 /*
2775 * Check various preconditions.
2776 */
2777#ifdef VBOX_STRICT
2778 Assert(REMR3QueryPendingInterrupt(pVM, pVCpu) == REM_NO_PENDING_IRQ);
2779 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) == 3 || (pCtx->ss & X86_SEL_RPL) == 0);
2780 AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)
2781 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
2782 ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
2783 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
2784 && PGMMapHasConflicts(pVM))
2785 {
2786 PGMMapCheck(pVM);
2787 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2788 return VERR_INTERNAL_ERROR;
2789 }
2790#endif /* VBOX_STRICT */
2791
2792 /*
2793 * Process high priority pre-execution raw-mode FFs.
2794 */
2795 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
2796 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2797 {
2798 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
2799 if (rc != VINF_SUCCESS)
2800 break;
2801 }
2802
2803 /*
2804 * If we're going to execute ring-0 code, the guest state needs to
2805 * be modified a bit and some of the state components (IF, SS/CS RPL,
2806 * and perhaps EIP) needs to be stored with PATM.
2807 */
2808 rc = CPUMRawEnter(pVCpu, NULL);
2809 if (rc != VINF_SUCCESS)
2810 {
2811 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
2812 break;
2813 }
2814
2815 /*
2816 * Scan code before executing it. Don't bother with user mode or V86 code
2817 */
2818 if ( (pCtx->ss & X86_SEL_RPL) <= 1
2819 && !pCtx->eflags.Bits.u1VM
2820 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
2821 {
2822 STAM_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWEntry, b);
2823 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
2824 STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b);
2825 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
2826 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2827 {
2828 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
2829 if (rc != VINF_SUCCESS)
2830 {
2831 rc = CPUMRawLeave(pVCpu, NULL, rc);
2832 break;
2833 }
2834 }
2835 }
2836
2837#ifdef LOG_ENABLED
2838 /*
2839 * Log important stuff before entering GC.
2840 */
2841 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);
2842 if (pCtx->eflags.Bits.u1VM)
2843 Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2844 else if ((pCtx->ss & X86_SEL_RPL) == 1)
2845 {
2846 bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);
2847 Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL), fCSAMScanned));
2848 }
2849 else if ((pCtx->ss & X86_SEL_RPL) == 3)
2850 Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2851#endif /* LOG_ENABLED */
2852
2853
2854
2855 /*
2856 * Execute the code.
2857 */
2858 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
2859 STAM_PROFILE_START(&pVCpu->em.s.StatRAWExec, c);
2860 rc = VMMR3RawRunGC(pVM, pVCpu);
2861 STAM_PROFILE_STOP(&pVCpu->em.s.StatRAWExec, c);
2862 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTail, d);
2863
2864 LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL)));
2865 LogFlow(("VMMR3RawRunGC returned %Rrc\n", rc));
2866
2867
2868
2869 /*
2870 * Restore the real CPU state and deal with high priority post
2871 * execution FFs before doing anything else.
2872 */
2873 rc = CPUMRawLeave(pVCpu, NULL, rc);
2874 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
2875 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
2876 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
2877 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
2878
2879#ifdef VBOX_STRICT
2880 /*
2881 * Assert TSS consistency & rc vs patch code.
2882 */
2883 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
2884 && EMIsRawRing0Enabled(pVM))
2885 SELMR3CheckTSS(pVM);
2886 switch (rc)
2887 {
2888 case VINF_SUCCESS:
2889 case VINF_EM_RAW_INTERRUPT:
2890 case VINF_PATM_PATCH_TRAP_PF:
2891 case VINF_PATM_PATCH_TRAP_GP:
2892 case VINF_PATM_PATCH_INT3:
2893 case VINF_PATM_CHECK_PATCH_PAGE:
2894 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2895 case VINF_EM_RAW_GUEST_TRAP:
2896 case VINF_EM_RESCHEDULE_RAW:
2897 break;
2898
2899 default:
2900 if (PATMIsPatchGCAddr(pVM, pCtx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))
2901 LogIt(NULL, 0, LOG_GROUP_PATM, ("Patch code interrupted at %RRv for reason %Rrc\n", (RTRCPTR)CPUMGetGuestEIP(pVCpu), rc));
2902 break;
2903 }
2904 /*
2905 * Let's go paranoid!
2906 */
2907 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
2908 && PGMMapHasConflicts(pVM))
2909 {
2910 PGMMapCheck(pVM);
2911 AssertMsgFailed(("We should not get conflicts any longer!!! rc=%Rrc\n", rc));
2912 return VERR_INTERNAL_ERROR;
2913 }
2914#endif /* VBOX_STRICT */
2915
2916 /*
2917 * Process the returned status code.
2918 */
2919 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2920 {
2921 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2922 break;
2923 }
2924 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
2925 if (rc != VINF_SUCCESS)
2926 {
2927 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
2928 if (rc != VINF_SUCCESS)
2929 {
2930 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2931 break;
2932 }
2933 }
2934
2935 /*
2936 * Check and execute forced actions.
2937 */
2938#ifdef VBOX_HIGH_RES_TIMERS_HACK
2939 TMTimerPoll(pVM);
2940#endif
2941 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2942 if ( VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)
2943 || VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2944 {
2945 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);
2946
2947 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a);
2948 rc = emR3ForcedActions(pVM, pVCpu, rc);
2949 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWTotal, a);
2950 if ( rc != VINF_SUCCESS
2951 && rc != VINF_EM_RESCHEDULE_RAW)
2952 {
2953 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
2954 if (rc != VINF_SUCCESS)
2955 {
2956 *pfFFDone = true;
2957 break;
2958 }
2959 }
2960 }
2961 }
2962
2963 /*
2964 * Return to outer loop.
2965 */
2966#if defined(LOG_ENABLED) && defined(DEBUG)
2967 RTLogFlush(NULL);
2968#endif
2969 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTotal, a);
2970 return rc;
2971}
2972
2973
2974/**
2975 * Executes hardware accelerated raw code. (Intel VMX & AMD SVM)
2976 *
2977 * This function contains the raw-mode version of the inner
2978 * execution loop (the outer loop being in EMR3ExecuteVM()).
2979 *
2980 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
2981 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2982 *
2983 * @param pVM VM handle.
2984 * @param pVCpu VMCPU handle.
2985 * @param pfFFDone Where to store an indicator telling whether or not
2986 * FFs were done before returning.
2987 */
2988static int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
2989{
2990 int rc = VERR_INTERNAL_ERROR;
2991 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2992
2993 LogFlow(("emR3HwAccExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip));
2994 *pfFFDone = false;
2995
2996 STAM_COUNTER_INC(&pVCpu->em.s.StatHwAccExecuteEntry);
2997
2998#ifdef EM_NOTIFY_HWACCM
2999 HWACCMR3NotifyScheduled(pVCpu);
3000#endif
3001
3002 /*
3003 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
3004 */
3005 for (;;)
3006 {
3007 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHwAccEntry, a);
3008
3009 /*
3010 * Process high priority pre-execution raw-mode FFs.
3011 */
3012 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */
3013 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
3014 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
3015 {
3016 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
3017 if (rc != VINF_SUCCESS)
3018 break;
3019 }
3020
3021#ifdef LOG_ENABLED
3022 /*
3023 * Log important stuff before entering GC.
3024 */
3025 if (TRPMHasTrap(pVCpu))
3026 Log(("Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", TRPMGetTrapNo(pVCpu), pCtx->cs, (RTGCPTR)pCtx->rip));
3027
3028 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
3029 if (pCtx->eflags.Bits.u1VM)
3030 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
3031 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
3032 Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3033 else
3034 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3035#endif /* LOG_ENABLED */
3036
3037 /*
3038 * Execute the code.
3039 */
3040 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHwAccEntry, a);
3041 STAM_PROFILE_START(&pVCpu->em.s.StatHwAccExec, x);
3042 rc = VMMR3HwAccRunGC(pVM, pVCpu);
3043 STAM_PROFILE_STOP(&pVCpu->em.s.StatHwAccExec, x);
3044
3045 /*
3046 * Deal with high priority post execution FFs before doing anything else.
3047 */
3048 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
3049 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
3050 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
3051 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
3052
3053 /*
3054 * Process the returned status code.
3055 */
3056 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
3057 break;
3058
3059 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
3060 if (rc != VINF_SUCCESS)
3061 break;
3062
3063 /*
3064 * Check and execute forced actions.
3065 */
3066#ifdef VBOX_HIGH_RES_TIMERS_HACK
3067 TMTimerPoll(pVM);
3068#endif
3069 if (VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK))
3070 {
3071 rc = emR3ForcedActions(pVM, pVCpu, rc);
3072 if ( rc != VINF_SUCCESS
3073 && rc != VINF_EM_RESCHEDULE_HWACC)
3074 {
3075 *pfFFDone = true;
3076 break;
3077 }
3078 }
3079 }
3080
3081 /*
3082 * Return to outer loop.
3083 */
3084#if defined(LOG_ENABLED) && defined(DEBUG)
3085 RTLogFlush(NULL);
3086#endif
3087 return rc;
3088}
3089
3090
3091/**
3092 * Decides whether to execute RAW, HWACC or REM.
3093 *
3094 * @returns new EM state
3095 * @param pVM The VM.
3096 * @param pVCpu The VMCPU handle.
3097 * @param pCtx The CPU context.
3098 */
3099static EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3100{
3101 /*
3102 * When forcing raw-mode execution, things are simple.
3103 */
3104 if (pVCpu->em.s.fForceRAW)
3105 return EMSTATE_RAW;
3106
3107 /*
3108 * We stay in the wait for SIPI state unless explicitly told otherwise.
3109 */
3110 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
3111 return EMSTATE_WAIT_SIPI;
3112
3113 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
3114 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
3115 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
3116
3117 X86EFLAGS EFlags = pCtx->eflags;
3118 if (HWACCMIsEnabled(pVM))
3119 {
3120 /* Hardware accelerated raw-mode:
3121 *
3122 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
3123 */
3124 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
3125 return EMSTATE_HWACC;
3126
3127 /* Note: Raw mode and hw accelerated mode are incompatible. The latter turns
3128 * off monitoring features essential for raw mode! */
3129 return EMSTATE_REM;
3130 }
3131
3132 /*
3133 * Standard raw-mode:
3134 *
3135 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
3136 * or 32 bits protected mode ring 0 code
3137 *
3138 * The tests are ordered by the likelyhood of being true during normal execution.
3139 */
3140 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
3141 {
3142 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
3143 return EMSTATE_REM;
3144 }
3145
3146#ifndef VBOX_RAW_V86
3147 if (EFlags.u32 & X86_EFL_VM) {
3148 Log2(("raw mode refused: VM_MASK\n"));
3149 return EMSTATE_REM;
3150 }
3151#endif
3152
3153 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
3154 uint32_t u32CR0 = pCtx->cr0;
3155 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3156 {
3157 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
3158 return EMSTATE_REM;
3159 }
3160
3161 if (pCtx->cr4 & X86_CR4_PAE)
3162 {
3163 uint32_t u32Dummy, u32Features;
3164
3165 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
3166 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
3167 return EMSTATE_REM;
3168 }
3169
3170 unsigned uSS = pCtx->ss;
3171 if ( pCtx->eflags.Bits.u1VM
3172 || (uSS & X86_SEL_RPL) == 3)
3173 {
3174 if (!EMIsRawRing3Enabled(pVM))
3175 return EMSTATE_REM;
3176
3177 if (!(EFlags.u32 & X86_EFL_IF))
3178 {
3179 Log2(("raw mode refused: IF (RawR3)\n"));
3180 return EMSTATE_REM;
3181 }
3182
3183 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
3184 {
3185 Log2(("raw mode refused: CR0.WP + RawR0\n"));
3186 return EMSTATE_REM;
3187 }
3188 }
3189 else
3190 {
3191 if (!EMIsRawRing0Enabled(pVM))
3192 return EMSTATE_REM;
3193
3194 /* Only ring 0 supervisor code. */
3195 if ((uSS & X86_SEL_RPL) != 0)
3196 {
3197 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
3198 return EMSTATE_REM;
3199 }
3200
3201 // Let's start with pure 32 bits ring 0 code first
3202 /** @todo What's pure 32-bit mode? flat? */
3203 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
3204 || !(pCtx->csHid.Attr.n.u1DefBig))
3205 {
3206 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
3207 return EMSTATE_REM;
3208 }
3209
3210 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
3211 if (!(u32CR0 & X86_CR0_WP))
3212 {
3213 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
3214 return EMSTATE_REM;
3215 }
3216
3217 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
3218 {
3219 Log2(("raw r0 mode forced: patch code\n"));
3220 return EMSTATE_RAW;
3221 }
3222
3223#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
3224 if (!(EFlags.u32 & X86_EFL_IF))
3225 {
3226 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
3227 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
3228 return EMSTATE_REM;
3229 }
3230#endif
3231
3232 /** @todo still necessary??? */
3233 if (EFlags.Bits.u2IOPL != 0)
3234 {
3235 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
3236 return EMSTATE_REM;
3237 }
3238 }
3239
3240 Assert(PGMPhysIsA20Enabled(pVCpu));
3241 return EMSTATE_RAW;
3242}
3243
3244
3245/**
3246 * Executes all high priority post execution force actions.
3247 *
3248 * @returns rc or a fatal status code.
3249 *
3250 * @param pVM VM handle.
3251 * @param pVCpu VMCPU handle.
3252 * @param rc The current rc.
3253 */
3254static int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
3255{
3256 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_CRITSECT))
3257 PDMR3CritSectFF(pVM);
3258
3259 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
3260 CSAMR3DoPendingAction(pVM, pVCpu);
3261
3262 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3263 {
3264 if ( rc > VINF_EM_NO_MEMORY
3265 && rc <= VINF_EM_LAST)
3266 rc = VINF_EM_NO_MEMORY;
3267 }
3268
3269 return rc;
3270}
3271
3272
3273/**
3274 * Executes all pending forced actions.
3275 *
3276 * Forced actions can cause execution delays and execution
3277 * rescheduling. The first we deal with using action priority, so
3278 * that for instance pending timers aren't scheduled and ran until
3279 * right before execution. The rescheduling we deal with using
3280 * return codes. The same goes for VM termination, only in that case
3281 * we exit everything.
3282 *
3283 * @returns VBox status code of equal or greater importance/severity than rc.
3284 * The most important ones are: VINF_EM_RESCHEDULE,
3285 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
3286 *
3287 * @param pVM VM handle.
3288 * @param pVCpu VMCPU handle.
3289 * @param rc The current rc.
3290 *
3291 */
3292static int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
3293{
3294 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
3295#ifdef VBOX_STRICT
3296 int rcIrq = VINF_SUCCESS;
3297#endif
3298 int rc2;
3299#define UPDATE_RC() \
3300 do { \
3301 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
3302 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
3303 break; \
3304 if (!rc || rc2 < rc) \
3305 rc = rc2; \
3306 } while (0)
3307
3308 /*
3309 * Post execution chunk first.
3310 */
3311 if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
3312 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
3313 {
3314 /*
3315 * Termination request.
3316 */
3317 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
3318 {
3319 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3320 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3321 return VINF_EM_TERMINATE;
3322 }
3323
3324 /*
3325 * Debugger Facility polling.
3326 */
3327 if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
3328 {
3329 rc2 = DBGFR3VMMForcedAction(pVM);
3330 UPDATE_RC();
3331 }
3332
3333 /*
3334 * Postponed reset request.
3335 */
3336 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET_BIT))
3337 {
3338 rc2 = VMR3Reset(pVM);
3339 UPDATE_RC();
3340 }
3341
3342 /*
3343 * CSAM page scanning.
3344 */
3345 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3346 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
3347 {
3348 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
3349
3350 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
3351 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
3352
3353 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
3354 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
3355 }
3356
3357 /*
3358 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
3359 */
3360 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3361 {
3362 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3363 UPDATE_RC();
3364 if (rc == VINF_EM_NO_MEMORY)
3365 return rc;
3366 }
3367
3368 /* check that we got them all */
3369 Assert(!(VM_FF_NORMAL_PRIORITY_POST_MASK & ~(VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)));
3370 Assert(!(VMCPU_FF_NORMAL_PRIORITY_POST_MASK & ~(VMCPU_FF_CSAM_SCAN_PAGE)));
3371 }
3372
3373 /*
3374 * Normal priority then.
3375 * (Executed in no particular order.)
3376 */
3377 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
3378 {
3379 /*
3380 * PDM Queues are pending.
3381 */
3382 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
3383 PDMR3QueueFlushAll(pVM);
3384
3385 /*
3386 * PDM DMA transfers are pending.
3387 */
3388 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
3389 PDMR3DmaRun(pVM);
3390
3391 /*
3392 * Requests from other threads.
3393 */
3394 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
3395 {
3396 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY);
3397 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE)
3398 {
3399 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
3400 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3401 return rc2;
3402 }
3403 UPDATE_RC();
3404 }
3405
3406 /* Replay the handler notification changes. */
3407 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
3408 {
3409 emR3RemLock(pVM);
3410 REMR3ReplayHandlerNotifications(pVM);
3411 emR3RemUnlock(pVM);
3412 }
3413
3414 /* check that we got them all */
3415 Assert(!(VM_FF_NORMAL_PRIORITY_MASK & ~(VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)));
3416 }
3417
3418 /*
3419 * Normal priority then. (per-VCPU)
3420 * (Executed in no particular order.)
3421 */
3422 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3423 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
3424 {
3425 /*
3426 * Requests from other threads.
3427 */
3428 if (VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, VMCPU_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
3429 {
3430 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu);
3431 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE)
3432 {
3433 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
3434 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3435 return rc2;
3436 }
3437 UPDATE_RC();
3438 }
3439
3440 /* check that we got them all */
3441 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
3442 }
3443
3444 /*
3445 * High priority pre execution chunk last.
3446 * (Executed in ascending priority order.)
3447 */
3448 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
3449 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
3450 {
3451 /*
3452 * Timers before interrupts.
3453 */
3454 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_TIMER, VM_FF_PGM_NO_MEMORY))
3455 TMR3TimerQueuesDo(pVM);
3456
3457 /*
3458 * The instruction following an emulated STI should *always* be executed!
3459 */
3460 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3461 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3462 {
3463 Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
3464 if (CPUMGetGuestEIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
3465 {
3466 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
3467 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
3468 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
3469 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
3470 */
3471 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3472 }
3473 if (HWACCMR3IsActive(pVM))
3474 rc2 = VINF_EM_RESCHEDULE_HWACC;
3475 else
3476 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
3477
3478 UPDATE_RC();
3479 }
3480
3481 /*
3482 * Interrupts.
3483 */
3484 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3485 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3486 && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
3487 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
3488 && PATMAreInterruptsEnabled(pVM)
3489 && !HWACCMR3IsEventPending(pVM))
3490 {
3491 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
3492 {
3493 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
3494 /** @todo this really isn't nice, should properly handle this */
3495 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
3496#ifdef VBOX_STRICT
3497 rcIrq = rc2;
3498#endif
3499 UPDATE_RC();
3500 }
3501 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
3502 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
3503 {
3504 rc2 = VINF_EM_RESCHEDULE_REM;
3505 UPDATE_RC();
3506 }
3507 }
3508
3509 /*
3510 * Allocate handy pages.
3511 */
3512 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
3513 {
3514 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3515 UPDATE_RC();
3516 }
3517
3518 /*
3519 * Debugger Facility request.
3520 */
3521 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
3522 {
3523 rc2 = DBGFR3VMMForcedAction(pVM);
3524 UPDATE_RC();
3525 }
3526
3527 /*
3528 * Termination request.
3529 */
3530 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
3531 {
3532 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3533 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3534 return VINF_EM_TERMINATE;
3535 }
3536
3537 /*
3538 * Out of memory? Since most of our fellow high priority actions may cause us
3539 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
3540 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
3541 * than us since we can terminate without allocating more memory.
3542 */
3543 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3544 {
3545 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3546 UPDATE_RC();
3547 if (rc == VINF_EM_NO_MEMORY)
3548 return rc;
3549 }
3550
3551#ifdef DEBUG
3552 /*
3553 * Debug, pause the VM.
3554 */
3555 if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
3556 {
3557 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
3558 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
3559 return VINF_EM_SUSPEND;
3560 }
3561
3562#endif
3563 /* check that we got them all */
3564 Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)));
3565 Assert(!(VMCPU_FF_HIGH_PRIORITY_PRE_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)));
3566 }
3567
3568#undef UPDATE_RC
3569 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
3570 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3571 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
3572 return rc;
3573}
3574
3575
3576/**
3577 * Execute VM.
3578 *
3579 * This function is the main loop of the VM. The emulation thread
3580 * calls this function when the VM has been successfully constructed
3581 * and we're ready for executing the VM.
3582 *
3583 * Returning from this function means that the VM is turned off or
3584 * suspended (state already saved) and deconstruction in next in line.
3585 *
3586 * All interaction from other thread are done using forced actions
3587 * and signaling of the wait object.
3588 *
3589 * @returns VBox status code, informational status codes may indicate failure.
3590 * @param pVM The VM to operate on.
3591 * @param pVCpu The VMCPU to operate on.
3592 */
3593VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
3594{
3595 LogFlow(("EMR3ExecuteVM: pVM=%p enmVMState=%d enmState=%d (%s) fForceRAW=%d\n", pVM, pVM->enmVMState,
3596 pVCpu->em.s.enmState, EMR3GetStateName(pVCpu->em.s.enmState), pVCpu->em.s.fForceRAW));
3597 VM_ASSERT_EMT(pVM);
3598 Assert(pVCpu->em.s.enmState == EMSTATE_NONE || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI || pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
3599
3600 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
3601 if (rc == 0)
3602 {
3603 /*
3604 * Start the virtual time.
3605 */
3606 rc = TMVirtualResume(pVM);
3607 Assert(rc == VINF_SUCCESS);
3608 rc = TMCpuTickResume(pVCpu);
3609 Assert(rc == VINF_SUCCESS);
3610
3611 /*
3612 * The Outer Main Loop.
3613 */
3614 bool fFFDone = false;
3615
3616 /* Reschedule right away to start in the right state. */
3617 rc = VINF_SUCCESS;
3618
3619 /** @todo doesn't work for the save/restore case */
3620 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
3621 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
3622 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
3623 {
3624 /* Pause->Resume: Restore the old wait state or else we'll start executing code. */
3625 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3626 }
3627 else
3628 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
3629
3630 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3631 for (;;)
3632 {
3633 /*
3634 * Before we can schedule anything (we're here because
3635 * scheduling is required) we must service any pending
3636 * forced actions to avoid any pending action causing
3637 * immediate rescheduling upon entering an inner loop
3638 *
3639 * Do forced actions.
3640 */
3641 if ( !fFFDone
3642 && rc != VINF_EM_TERMINATE
3643 && rc != VINF_EM_OFF
3644 && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
3645 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK)))
3646 {
3647 rc = emR3ForcedActions(pVM, pVCpu, rc);
3648 if ( ( rc == VINF_EM_RESCHEDULE_REM
3649 || rc == VINF_EM_RESCHEDULE_HWACC)
3650 && pVCpu->em.s.fForceRAW)
3651 rc = VINF_EM_RESCHEDULE_RAW;
3652 }
3653 else if (fFFDone)
3654 fFFDone = false;
3655
3656 /*
3657 * Now what to do?
3658 */
3659 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
3660 switch (rc)
3661 {
3662 /*
3663 * Keep doing what we're currently doing.
3664 */
3665 case VINF_SUCCESS:
3666 break;
3667
3668 /*
3669 * Reschedule - to raw-mode execution.
3670 */
3671 case VINF_EM_RESCHEDULE_RAW:
3672 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVCpu->em.s.enmState, EMSTATE_RAW));
3673 pVCpu->em.s.enmState = EMSTATE_RAW;
3674 break;
3675
3676 /*
3677 * Reschedule - to hardware accelerated raw-mode execution.
3678 */
3679 case VINF_EM_RESCHEDULE_HWACC:
3680 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVCpu->em.s.enmState, EMSTATE_HWACC));
3681 Assert(!pVCpu->em.s.fForceRAW);
3682 pVCpu->em.s.enmState = EMSTATE_HWACC;
3683 break;
3684
3685 /*
3686 * Reschedule - to recompiled execution.
3687 */
3688 case VINF_EM_RESCHEDULE_REM:
3689 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVCpu->em.s.enmState, EMSTATE_REM));
3690 pVCpu->em.s.enmState = EMSTATE_REM;
3691 break;
3692
3693#ifdef VBOX_WITH_VMI
3694 /*
3695 * Reschedule - parav call.
3696 */
3697 case VINF_EM_RESCHEDULE_PARAV:
3698 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_PARAV: %d -> %d (EMSTATE_PARAV)\n", pVCpu->em.s.enmState, EMSTATE_PARAV));
3699 pVCpu->em.s.enmState = EMSTATE_PARAV;
3700 break;
3701#endif
3702
3703 /*
3704 * Resume.
3705 */
3706 case VINF_EM_RESUME:
3707 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVCpu->em.s.enmState));
3708 /* Don't reschedule in the halted or wait for SIPI case. */
3709 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
3710 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
3711 break;
3712 /* fall through and get scheduled. */
3713
3714 /*
3715 * Reschedule.
3716 */
3717 case VINF_EM_RESCHEDULE:
3718 {
3719 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
3720 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, EMR3GetStateName(enmState)));
3721 pVCpu->em.s.enmState = enmState;
3722 break;
3723 }
3724
3725 /*
3726 * Halted.
3727 */
3728 case VINF_EM_HALT:
3729 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_HALTED));
3730 pVCpu->em.s.enmState = EMSTATE_HALTED;
3731 break;
3732
3733 /*
3734 * Suspend.
3735 */
3736 case VINF_EM_SUSPEND:
3737 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
3738 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3739 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3740 break;
3741
3742 /*
3743 * Reset.
3744 * We might end up doing a double reset for now, we'll have to clean up the mess later.
3745 */
3746 case VINF_EM_RESET:
3747 {
3748 if (pVCpu->idCpu == 0)
3749 {
3750 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
3751 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, EMR3GetStateName(enmState)));
3752 pVCpu->em.s.enmState = enmState;
3753 }
3754 else
3755 {
3756 /* All other VCPUs go into the wait for SIPI state. */
3757 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
3758 }
3759 break;
3760 }
3761
3762 /*
3763 * Power Off.
3764 */
3765 case VINF_EM_OFF:
3766 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3767 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
3768 TMVirtualPause(pVM);
3769 TMCpuTickPause(pVCpu);
3770 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3771 return rc;
3772
3773 /*
3774 * Terminate the VM.
3775 */
3776 case VINF_EM_TERMINATE:
3777 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3778 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
3779 TMVirtualPause(pVM);
3780 TMCpuTickPause(pVCpu);
3781 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3782 return rc;
3783
3784
3785 /*
3786 * Out of memory, suspend the VM and stuff.
3787 */
3788 case VINF_EM_NO_MEMORY:
3789 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
3790 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3791 TMVirtualPause(pVM);
3792 TMCpuTickPause(pVCpu);
3793 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3794
3795 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
3796 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
3797 if (rc != VINF_EM_SUSPEND)
3798 {
3799 if (RT_SUCCESS_NP(rc))
3800 {
3801 AssertLogRelMsgFailed(("%Rrc\n", rc));
3802 rc = VERR_EM_INTERNAL_ERROR;
3803 }
3804 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3805 }
3806 return rc;
3807
3808 /*
3809 * Guest debug events.
3810 */
3811 case VINF_EM_DBG_STEPPED:
3812 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
3813 case VINF_EM_DBG_STOP:
3814 case VINF_EM_DBG_BREAKPOINT:
3815 case VINF_EM_DBG_STEP:
3816 if (pVCpu->em.s.enmState == EMSTATE_RAW)
3817 {
3818 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
3819 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
3820 }
3821 else
3822 {
3823 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
3824 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
3825 }
3826 break;
3827
3828 /*
3829 * Hypervisor debug events.
3830 */
3831 case VINF_EM_DBG_HYPER_STEPPED:
3832 case VINF_EM_DBG_HYPER_BREAKPOINT:
3833 case VINF_EM_DBG_HYPER_ASSERTION:
3834 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_HYPER));
3835 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
3836 break;
3837
3838 /*
3839 * Guru mediations.
3840 */
3841 case VERR_VMM_RING0_ASSERTION:
3842 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
3843 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3844 break;
3845
3846 /*
3847 * Any error code showing up here other than the ones we
3848 * know and process above are considered to be FATAL.
3849 *
3850 * Unknown warnings and informational status codes are also
3851 * included in this.
3852 */
3853 default:
3854 if (RT_SUCCESS_NP(rc))
3855 {
3856 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
3857 rc = VERR_EM_INTERNAL_ERROR;
3858 }
3859 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3860 Log(("EMR3ExecuteVM returns %d\n", rc));
3861 break;
3862 }
3863
3864 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
3865 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3866
3867 /*
3868 * Act on the state.
3869 */
3870 switch (pVCpu->em.s.enmState)
3871 {
3872 /*
3873 * Execute raw.
3874 */
3875 case EMSTATE_RAW:
3876 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
3877 break;
3878
3879 /*
3880 * Execute hardware accelerated raw.
3881 */
3882 case EMSTATE_HWACC:
3883 rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
3884 break;
3885
3886 /*
3887 * Execute recompiled.
3888 */
3889 case EMSTATE_REM:
3890 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
3891 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
3892 break;
3893
3894#ifdef VBOX_WITH_VMI
3895 /*
3896 * Execute PARAV function.
3897 */
3898 case EMSTATE_PARAV:
3899 rc = PARAVCallFunction(pVM);
3900 pVCpu->em.s.enmState = EMSTATE_REM;
3901 break;
3902#endif
3903
3904 /*
3905 * Application processor execution halted until SIPI.
3906 */
3907 case EMSTATE_WAIT_SIPI:
3908 Assert(!(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
3909 /* no break */
3910 /*
3911 * hlt - execution halted until interrupt.
3912 */
3913 case EMSTATE_HALTED:
3914 {
3915 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
3916 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
3917 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
3918 break;
3919 }
3920
3921 /*
3922 * Suspended - return to VM.cpp.
3923 */
3924 case EMSTATE_SUSPENDED:
3925 TMVirtualPause(pVM);
3926 TMCpuTickPause(pVCpu);
3927 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3928 return VINF_EM_SUSPEND;
3929
3930 /*
3931 * Debugging in the guest.
3932 */
3933 case EMSTATE_DEBUG_GUEST_REM:
3934 case EMSTATE_DEBUG_GUEST_RAW:
3935 TMVirtualPause(pVM);
3936 TMCpuTickPause(pVCpu);
3937 rc = emR3Debug(pVM, pVCpu, rc);
3938 TMVirtualResume(pVM);
3939 TMCpuTickResume(pVCpu);
3940 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3941 break;
3942
3943 /*
3944 * Debugging in the hypervisor.
3945 */
3946 case EMSTATE_DEBUG_HYPER:
3947 {
3948 TMVirtualPause(pVM);
3949 TMCpuTickPause(pVCpu);
3950 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3951
3952 rc = emR3Debug(pVM, pVCpu, rc);
3953 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3954 if (rc != VINF_SUCCESS)
3955 {
3956 /* switch to guru meditation mode */
3957 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3958 VMMR3FatalDump(pVM, pVCpu, rc);
3959 return rc;
3960 }
3961
3962 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3963 TMVirtualResume(pVM);
3964 TMCpuTickResume(pVCpu);
3965 break;
3966 }
3967
3968 /*
3969 * Guru meditation takes place in the debugger.
3970 */
3971 case EMSTATE_GURU_MEDITATION:
3972 {
3973 TMVirtualPause(pVM);
3974 TMCpuTickPause(pVCpu);
3975 VMMR3FatalDump(pVM, pVCpu, rc);
3976 emR3Debug(pVM, pVCpu, rc);
3977 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3978 return rc;
3979 }
3980
3981 /*
3982 * The states we don't expect here.
3983 */
3984 case EMSTATE_NONE:
3985 case EMSTATE_TERMINATING:
3986 default:
3987 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3988 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3989 TMVirtualPause(pVM);
3990 TMCpuTickPause(pVCpu);
3991 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3992 return VERR_EM_INTERNAL_ERROR;
3993 }
3994 } /* The Outer Main Loop */
3995 }
3996 else
3997 {
3998 /*
3999 * Fatal error.
4000 */
4001 LogFlow(("EMR3ExecuteVM: returns %Rrc (longjmp / fatal error)\n", rc));
4002 TMVirtualPause(pVM);
4003 TMCpuTickPause(pVCpu);
4004 VMMR3FatalDump(pVM, pVCpu, rc);
4005 emR3Debug(pVM, pVCpu, rc);
4006 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
4007 /** @todo change the VM state! */
4008 return rc;
4009 }
4010
4011 /* (won't ever get here). */
4012 AssertFailed();
4013}
4014
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette