VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 20833

Last change on this file since 20833 was 20671, checked in by vboxsync, 16 years ago

Bigger lock for the pagefault handler.
Avoid deadlocks when syncing notification handlers with our recompiler.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 164.9 KB
Line 
1/* $Id: EM.cpp 20671 2009-06-17 15:23:14Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_em EM - The Execution Monitor / Manager
23 *
24 * The Execution Monitor/Manager is responsible for running the VM, scheduling
25 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
26 * Interpreted), and keeping the CPU states in sync. The function
27 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
28 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
29 * emR3RemExecute).
30 *
31 * The interpreted execution is only used to avoid switching between
32 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
33 * The interpretation is thus implemented as part of EM.
34 *
35 * @see grp_em
36 */
37
38/*******************************************************************************
39* Header Files *
40*******************************************************************************/
41#define LOG_GROUP LOG_GROUP_EM
42#include <VBox/em.h>
43#include <VBox/vmm.h>
44#ifdef VBOX_WITH_VMI
45# include <VBox/parav.h>
46#endif
47#include <VBox/patm.h>
48#include <VBox/csam.h>
49#include <VBox/selm.h>
50#include <VBox/trpm.h>
51#include <VBox/iom.h>
52#include <VBox/dbgf.h>
53#include <VBox/pgm.h>
54#include <VBox/rem.h>
55#include <VBox/tm.h>
56#include <VBox/mm.h>
57#include <VBox/ssm.h>
58#include <VBox/pdmapi.h>
59#include <VBox/pdmcritsect.h>
60#include <VBox/pdmqueue.h>
61#include <VBox/hwaccm.h>
62#include <VBox/patm.h>
63#include "EMInternal.h"
64#include <VBox/vm.h>
65#include <VBox/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include <VBox/dbgf.h>
69
70#include <VBox/log.h>
71#include <iprt/thread.h>
72#include <iprt/assert.h>
73#include <iprt/asm.h>
74#include <iprt/semaphore.h>
75#include <iprt/string.h>
76#include <iprt/avl.h>
77#include <iprt/stream.h>
78#include <VBox/param.h>
79#include <VBox/err.h>
80
81
82/*******************************************************************************
83* Defined Constants And Macros *
84*******************************************************************************/
85#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
86#define EM_NOTIFY_HWACCM
87#endif
88
89
90/*******************************************************************************
91* Internal Functions *
92*******************************************************************************/
93static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
94static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
95static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
96static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98static int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu);
99static int emR3RawStep(PVM pVM, PVMCPU pVCpu);
100DECLINLINE(int) emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
101DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
102static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
103static int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
104DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
105static int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
106static int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
107static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu);
108static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret);
109static int emR3SingleStepExecRem(PVM pVM, uint32_t cIterations);
110static EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
111
112/**
113 * Initializes the EM.
114 *
115 * @returns VBox status code.
116 * @param pVM The VM to operate on.
117 */
118VMMR3DECL(int) EMR3Init(PVM pVM)
119{
120 LogFlow(("EMR3Init\n"));
121 /*
122 * Assert alignment and sizes.
123 */
124 AssertCompileMemberAlignment(VM, em.s, 32);
125 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
126 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
127 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
128
129 /*
130 * Init the structure.
131 */
132 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
133 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
134 if (RT_FAILURE(rc))
135 pVM->fRawR3Enabled = true;
136 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
137 if (RT_FAILURE(rc))
138 pVM->fRawR0Enabled = true;
139 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
140
141 /*
142 * Initialize the REM critical section.
143 */
144 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, "EM-REM");
145 AssertRCReturn(rc, rc);
146
147 /*
148 * Saved state.
149 */
150 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
151 NULL, emR3Save, NULL,
152 NULL, emR3Load, NULL);
153 if (RT_FAILURE(rc))
154 return rc;
155
156 for (unsigned i=0;i<pVM->cCPUs;i++)
157 {
158 PVMCPU pVCpu = &pVM->aCpus[i];
159
160 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
161
162 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
163 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
164 pVCpu->em.s.fForceRAW = false;
165
166 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
167 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
168 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
169
170# define EM_REG_COUNTER(a, b, c) \
171 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
172 AssertRC(rc);
173
174# define EM_REG_COUNTER_USED(a, b, c) \
175 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
176 AssertRC(rc);
177
178# define EM_REG_PROFILE(a, b, c) \
179 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
180 AssertRC(rc);
181
182# define EM_REG_PROFILE_ADV(a, b, c) \
183 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
184 AssertRC(rc);
185
186 /*
187 * Statistics.
188 */
189#ifdef VBOX_WITH_STATISTICS
190 PEMSTATS pStats;
191 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
192 if (RT_FAILURE(rc))
193 return rc;
194
195 pVCpu->em.s.pStatsR3 = pStats;
196 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
197 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
198
199 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
200 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
201
202 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
203 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
204
205 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
206 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
207 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
208 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
209 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
210 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
211 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
212 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
213 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
214 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
215 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
277
278 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
279 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
280
281 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MONITOR was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MONITOR was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
331
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
360
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
365
366 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
367 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
368 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
369 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
370 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
371 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
372 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
373 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 read instructions.");
374 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 read instructions.");
375 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 read instructions.");
376 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 read instructions.");
377 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 read instructions.");
378 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 write instructions.");
379 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 write instructions.");
380 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 write instructions.");
381 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 write instructions.");
382 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 write instructions.");
383 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
384 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
385 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
386 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
387 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
388 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
389 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
390 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
391 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
392
393 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
394 pVCpu->em.s.pCliStatTree = 0;
395
396 /* these should be considered for release statistics. */
397 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
398 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
399 EM_REG_COUNTER(&pVCpu->em.s.StatMiscEmu, "/PROF/CPU%d/EM/Emulation/Misc", "Profiling of emR3RawExecuteInstruction.");
400 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
401 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
402 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
403 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
404 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
405 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
406 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
407 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
408
409#endif /* VBOX_WITH_STATISTICS */
410
411 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
412 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
413 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
414 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
415
416 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
417 }
418
419 return VINF_SUCCESS;
420}
421
422
423/**
424 * Initializes the per-VCPU EM.
425 *
426 * @returns VBox status code.
427 * @param pVM The VM to operate on.
428 */
429VMMR3DECL(int) EMR3InitCPU(PVM pVM)
430{
431 LogFlow(("EMR3InitCPU\n"));
432 return VINF_SUCCESS;
433}
434
435
436/**
437 * Applies relocations to data and code managed by this
438 * component. This function will be called at init and
439 * whenever the VMM need to relocate it self inside the GC.
440 *
441 * @param pVM The VM.
442 */
443VMMR3DECL(void) EMR3Relocate(PVM pVM)
444{
445 LogFlow(("EMR3Relocate\n"));
446 for (unsigned i=0;i<pVM->cCPUs;i++)
447 {
448 PVMCPU pVCpu = &pVM->aCpus[i];
449
450 if (pVCpu->em.s.pStatsR3)
451 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
452 }
453}
454
455
456/**
457 * Reset notification.
458 *
459 * @param pVM
460 */
461VMMR3DECL(void) EMR3Reset(PVM pVM)
462{
463 LogFlow(("EMR3Reset: \n"));
464 for (unsigned i=0;i<pVM->cCPUs;i++)
465 {
466 PVMCPU pVCpu = &pVM->aCpus[i];
467
468 pVCpu->em.s.fForceRAW = false;
469 }
470}
471
472
473/**
474 * Terminates the EM.
475 *
476 * Termination means cleaning up and freeing all resources,
477 * the VM it self is at this point powered off or suspended.
478 *
479 * @returns VBox status code.
480 * @param pVM The VM to operate on.
481 */
482VMMR3DECL(int) EMR3Term(PVM pVM)
483{
484 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
485
486 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
487 return VINF_SUCCESS;
488}
489
490/**
491 * Terminates the per-VCPU EM.
492 *
493 * Termination means cleaning up and freeing all resources,
494 * the VM it self is at this point powered off or suspended.
495 *
496 * @returns VBox status code.
497 * @param pVM The VM to operate on.
498 */
499VMMR3DECL(int) EMR3TermCPU(PVM pVM)
500{
501 return 0;
502}
503
504/**
505 * Execute state save operation.
506 *
507 * @returns VBox status code.
508 * @param pVM VM Handle.
509 * @param pSSM SSM operation handle.
510 */
511static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
512{
513 for (VMCPUID i = 0; i < pVM->cCPUs; i++)
514 {
515 PVMCPU pVCpu = &pVM->aCpus[i];
516
517 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
518 AssertRCReturn(rc, rc);
519
520 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
521 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
522 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
523 AssertRCReturn(rc, rc);
524 }
525 return VINF_SUCCESS;
526}
527
528
529/**
530 * Execute state load operation.
531 *
532 * @returns VBox status code.
533 * @param pVM VM Handle.
534 * @param pSSM SSM operation handle.
535 * @param u32Version Data layout version.
536 */
537static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
538{
539 int rc = VINF_SUCCESS;
540
541 /*
542 * Validate version.
543 */
544 if ( u32Version != EM_SAVED_STATE_VERSION
545 && u32Version != EM_SAVED_STATE_VERSION_PRE_SMP)
546 {
547 AssertMsgFailed(("emR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, EM_SAVED_STATE_VERSION));
548 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
549 }
550
551 /*
552 * Load the saved state.
553 */
554 for (VMCPUID i = 0; i < pVM->cCPUs; i++)
555 {
556 PVMCPU pVCpu = &pVM->aCpus[i];
557
558 rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
559 if (RT_FAILURE(rc))
560 pVCpu->em.s.fForceRAW = false;
561
562 if (u32Version > EM_SAVED_STATE_VERSION_PRE_SMP)
563 {
564 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
565 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
566 AssertRCReturn(rc, rc);
567 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
568
569 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
570 }
571 Assert(!pVCpu->em.s.pCliStatTree);
572 }
573 return rc;
574}
575
576
577/**
578 * Enables or disables a set of raw-mode execution modes.
579 *
580 * @returns VINF_SUCCESS on success.
581 * @returns VINF_RESCHEDULE if a rescheduling might be required.
582 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
583 *
584 * @param pVM The VM to operate on.
585 * @param enmMode The execution mode change.
586 * @thread The emulation thread.
587 */
588VMMR3DECL(int) EMR3RawSetMode(PVM pVM, EMRAWMODE enmMode)
589{
590 switch (enmMode)
591 {
592 case EMRAW_NONE:
593 pVM->fRawR3Enabled = false;
594 pVM->fRawR0Enabled = false;
595 break;
596 case EMRAW_RING3_ENABLE:
597 pVM->fRawR3Enabled = true;
598 break;
599 case EMRAW_RING3_DISABLE:
600 pVM->fRawR3Enabled = false;
601 break;
602 case EMRAW_RING0_ENABLE:
603 pVM->fRawR0Enabled = true;
604 break;
605 case EMRAW_RING0_DISABLE:
606 pVM->fRawR0Enabled = false;
607 break;
608 default:
609 AssertMsgFailed(("Invalid enmMode=%d\n", enmMode));
610 return VERR_INVALID_PARAMETER;
611 }
612 Log(("EMR3SetRawMode: fRawR3Enabled=%RTbool fRawR0Enabled=%RTbool\n",
613 pVM->fRawR3Enabled, pVM->fRawR0Enabled));
614 return pVM->aCpus[0].em.s.enmState == EMSTATE_RAW ? VINF_EM_RESCHEDULE : VINF_SUCCESS;
615}
616
617
618/**
619 * Raise a fatal error.
620 *
621 * Safely terminate the VM with full state report and stuff. This function
622 * will naturally never return.
623 *
624 * @param pVCpu VMCPU handle.
625 * @param rc VBox status code.
626 */
627VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
628{
629 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
630 AssertReleaseMsgFailed(("longjmp returned!\n"));
631}
632
633
634/**
635 * Gets the EM state name.
636 *
637 * @returns pointer to read only state name,
638 * @param enmState The state.
639 */
640VMMR3DECL(const char *) EMR3GetStateName(EMSTATE enmState)
641{
642 switch (enmState)
643 {
644 case EMSTATE_NONE: return "EMSTATE_NONE";
645 case EMSTATE_RAW: return "EMSTATE_RAW";
646 case EMSTATE_HWACC: return "EMSTATE_HWACC";
647 case EMSTATE_REM: return "EMSTATE_REM";
648 case EMSTATE_PARAV: return "EMSTATE_PARAV";
649 case EMSTATE_HALTED: return "EMSTATE_HALTED";
650 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
651 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
652 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
653 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
654 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
655 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
656 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
657 default: return "Unknown!";
658 }
659}
660
661
662#ifdef VBOX_WITH_STATISTICS
663/**
664 * Just a braindead function to keep track of cli addresses.
665 * @param pVM VM handle.
666 * @param pVMCPU VMCPU handle.
667 * @param GCPtrInstr The EIP of the cli instruction.
668 */
669static void emR3RecordCli(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrInstr)
670{
671 PCLISTAT pRec;
672
673 pRec = (PCLISTAT)RTAvlPVGet(&pVCpu->em.s.pCliStatTree, (AVLPVKEY)GCPtrInstr);
674 if (!pRec)
675 {
676 /* New cli instruction; insert into the tree. */
677 pRec = (PCLISTAT)MMR3HeapAllocZ(pVM, MM_TAG_EM, sizeof(*pRec));
678 Assert(pRec);
679 if (!pRec)
680 return;
681 pRec->Core.Key = (AVLPVKEY)GCPtrInstr;
682
683 char szCliStatName[32];
684 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%RGv", GCPtrInstr);
685 STAM_REG(pVM, &pRec->Counter, STAMTYPE_COUNTER, szCliStatName, STAMUNIT_OCCURENCES, "Number of times cli was executed.");
686
687 bool fRc = RTAvlPVInsert(&pVCpu->em.s.pCliStatTree, &pRec->Core);
688 Assert(fRc); NOREF(fRc);
689 }
690 STAM_COUNTER_INC(&pRec->Counter);
691 STAM_COUNTER_INC(&pVCpu->em.s.StatTotalClis);
692}
693#endif /* VBOX_WITH_STATISTICS */
694
695
696/**
697 * Debug loop.
698 *
699 * @returns VBox status code for EM.
700 * @param pVM VM handle.
701 * @param pVCpu VMCPU handle.
702 * @param rc Current EM VBox status code..
703 */
704static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
705{
706 for (;;)
707 {
708 Log(("emR3Debug: rc=%Rrc\n", rc));
709 const int rcLast = rc;
710
711 /*
712 * Debug related RC.
713 */
714 switch (rc)
715 {
716 /*
717 * Single step an instruction.
718 */
719 case VINF_EM_DBG_STEP:
720 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
721 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
722 || pVCpu->em.s.fForceRAW /* paranoia */)
723 rc = emR3RawStep(pVM, pVCpu);
724 else
725 {
726 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
727 rc = emR3RemStep(pVM, pVCpu);
728 }
729 break;
730
731 /*
732 * Simple events: stepped, breakpoint, stop/assertion.
733 */
734 case VINF_EM_DBG_STEPPED:
735 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
736 break;
737
738 case VINF_EM_DBG_BREAKPOINT:
739 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
740 break;
741
742 case VINF_EM_DBG_STOP:
743 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
744 break;
745
746 case VINF_EM_DBG_HYPER_STEPPED:
747 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
748 break;
749
750 case VINF_EM_DBG_HYPER_BREAKPOINT:
751 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
752 break;
753
754 case VINF_EM_DBG_HYPER_ASSERTION:
755 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
756 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
757 break;
758
759 /*
760 * Guru meditation.
761 */
762 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
763 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
764 break;
765 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
766 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
767 break;
768
769 default: /** @todo don't use default for guru, but make special errors code! */
770 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
771 break;
772 }
773
774 /*
775 * Process the result.
776 */
777 do
778 {
779 switch (rc)
780 {
781 /*
782 * Continue the debugging loop.
783 */
784 case VINF_EM_DBG_STEP:
785 case VINF_EM_DBG_STOP:
786 case VINF_EM_DBG_STEPPED:
787 case VINF_EM_DBG_BREAKPOINT:
788 case VINF_EM_DBG_HYPER_STEPPED:
789 case VINF_EM_DBG_HYPER_BREAKPOINT:
790 case VINF_EM_DBG_HYPER_ASSERTION:
791 break;
792
793 /*
794 * Resuming execution (in some form) has to be done here if we got
795 * a hypervisor debug event.
796 */
797 case VINF_SUCCESS:
798 case VINF_EM_RESUME:
799 case VINF_EM_SUSPEND:
800 case VINF_EM_RESCHEDULE:
801 case VINF_EM_RESCHEDULE_RAW:
802 case VINF_EM_RESCHEDULE_REM:
803 case VINF_EM_HALT:
804 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
805 {
806 rc = emR3RawResumeHyper(pVM, pVCpu);
807 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
808 continue;
809 }
810 if (rc == VINF_SUCCESS)
811 rc = VINF_EM_RESCHEDULE;
812 return rc;
813
814 /*
815 * The debugger isn't attached.
816 * We'll simply turn the thing off since that's the easiest thing to do.
817 */
818 case VERR_DBGF_NOT_ATTACHED:
819 switch (rcLast)
820 {
821 case VINF_EM_DBG_HYPER_STEPPED:
822 case VINF_EM_DBG_HYPER_BREAKPOINT:
823 case VINF_EM_DBG_HYPER_ASSERTION:
824 case VERR_TRPM_PANIC:
825 case VERR_TRPM_DONT_PANIC:
826 case VERR_VMM_RING0_ASSERTION:
827 return rcLast;
828 }
829 return VINF_EM_OFF;
830
831 /*
832 * Status codes terminating the VM in one or another sense.
833 */
834 case VINF_EM_TERMINATE:
835 case VINF_EM_OFF:
836 case VINF_EM_RESET:
837 case VINF_EM_NO_MEMORY:
838 case VINF_EM_RAW_STALE_SELECTOR:
839 case VINF_EM_RAW_IRET_TRAP:
840 case VERR_TRPM_PANIC:
841 case VERR_TRPM_DONT_PANIC:
842 case VERR_VMM_RING0_ASSERTION:
843 case VERR_INTERNAL_ERROR:
844 case VERR_INTERNAL_ERROR_2:
845 case VERR_INTERNAL_ERROR_3:
846 case VERR_INTERNAL_ERROR_4:
847 case VERR_INTERNAL_ERROR_5:
848 case VERR_IPE_UNEXPECTED_STATUS:
849 case VERR_IPE_UNEXPECTED_INFO_STATUS:
850 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
851 return rc;
852
853 /*
854 * The rest is unexpected, and will keep us here.
855 */
856 default:
857 AssertMsgFailed(("Unxpected rc %Rrc!\n", rc));
858 break;
859 }
860 } while (false);
861 } /* debug for ever */
862}
863
864/**
865 * Steps recompiled code.
866 *
867 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
868 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
869 *
870 * @param pVM VM handle.
871 * @param pVCpu VMCPU handle.
872 */
873static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
874{
875 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
876
877 EMRemLock(pVM);
878
879 /*
880 * Switch to REM, step instruction, switch back.
881 */
882 int rc = REMR3State(pVM, pVCpu);
883 if (RT_SUCCESS(rc))
884 {
885 rc = REMR3Step(pVM, pVCpu);
886 REMR3StateBack(pVM, pVCpu);
887 }
888 EMRemUnlock(pVM);
889
890 LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
891 return rc;
892}
893
894
895/**
896 * Executes recompiled code.
897 *
898 * This function contains the recompiler version of the inner
899 * execution loop (the outer loop being in EMR3ExecuteVM()).
900 *
901 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
902 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
903 *
904 * @param pVM VM handle.
905 * @param pVCpu VMCPU handle.
906 * @param pfFFDone Where to store an indicator telling wheter or not
907 * FFs were done before returning.
908 *
909 */
910static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
911{
912#ifdef LOG_ENABLED
913 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
914 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
915
916 if (pCtx->eflags.Bits.u1VM)
917 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
918 else
919 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0));
920#endif
921 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
922
923#if defined(VBOX_STRICT) && defined(DEBUG_bird)
924 AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
925 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo #1419 - get flat address. */
926 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
927#endif
928
929 /* Big lock, but you are not supposed to own any lock when coming in here. */
930 EMRemLock(pVM);
931
932 /*
933 * Spin till we get a forced action which returns anything but VINF_SUCCESS
934 * or the REM suggests raw-mode execution.
935 */
936 *pfFFDone = false;
937 bool fInREMState = false;
938 int rc = VINF_SUCCESS;
939
940 /* Flush the recompiler TLB if the VCPU has changed. */
941 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
942 {
943 REMFlushTBs(pVM);
944 /* Also sync the entire state. */
945 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
946 }
947 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
948
949 for (;;)
950 {
951 /*
952 * Update REM state if not already in sync.
953 */
954 if (!fInREMState)
955 {
956 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
957 rc = REMR3State(pVM, pVCpu);
958 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
959 if (RT_FAILURE(rc))
960 break;
961 fInREMState = true;
962
963 /*
964 * We might have missed the raising of VMREQ, TIMER and some other
965 * imporant FFs while we were busy switching the state. So, check again.
966 */
967 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET)
968 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
969 {
970 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
971 goto l_REMDoForcedActions;
972 }
973 }
974
975
976 /*
977 * Execute REM.
978 */
979 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
980 rc = REMR3Run(pVM, pVCpu);
981 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
982
983
984 /*
985 * Deal with high priority post execution FFs before doing anything else.
986 */
987 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
988 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
989 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
990
991 /*
992 * Process the returned status code.
993 * (Try keep this short! Call functions!)
994 */
995 if (rc != VINF_SUCCESS)
996 {
997 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
998 break;
999 if (rc != VINF_REM_INTERRUPED_FF)
1000 {
1001 /*
1002 * Anything which is not known to us means an internal error
1003 * and the termination of the VM!
1004 */
1005 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1006 break;
1007 }
1008 }
1009
1010
1011 /*
1012 * Check and execute forced actions.
1013 * Sync back the VM state before calling any of these.
1014 */
1015#ifdef VBOX_HIGH_RES_TIMERS_HACK
1016 TMTimerPollVoid(pVM, pVCpu);
1017#endif
1018 AssertCompile((VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
1019 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
1020 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
1021 {
1022l_REMDoForcedActions:
1023 if (fInREMState)
1024 {
1025 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, d);
1026 REMR3StateBack(pVM, pVCpu);
1027 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, d);
1028 fInREMState = false;
1029 }
1030 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1031 rc = emR3ForcedActions(pVM, pVCpu, rc);
1032 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1033 if ( rc != VINF_SUCCESS
1034 && rc != VINF_EM_RESCHEDULE_REM)
1035 {
1036 *pfFFDone = true;
1037 break;
1038 }
1039 }
1040
1041 } /* The Inner Loop, recompiled execution mode version. */
1042
1043
1044 /*
1045 * Returning. Sync back the VM state if required.
1046 */
1047 if (fInREMState)
1048 {
1049 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, e);
1050 REMR3StateBack(pVM, pVCpu);
1051 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, e);
1052 }
1053 EMRemUnlock(pVM);
1054
1055 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1056 return rc;
1057}
1058
1059
1060/**
1061 * Resumes executing hypervisor after a debug event.
1062 *
1063 * This is kind of special since our current guest state is
1064 * potentially out of sync.
1065 *
1066 * @returns VBox status code.
1067 * @param pVM The VM handle.
1068 * @param pVCpu The VMCPU handle.
1069 */
1070static int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu)
1071{
1072 int rc;
1073 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1074 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER);
1075 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", pCtx->cs, pCtx->eip, pCtx->eflags));
1076
1077 /*
1078 * Resume execution.
1079 */
1080 CPUMRawEnter(pVCpu, NULL);
1081 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF);
1082 rc = VMMR3ResumeHyper(pVM, pVCpu);
1083 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc));
1084 rc = CPUMRawLeave(pVCpu, NULL, rc);
1085 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1086
1087 /*
1088 * Deal with the return code.
1089 */
1090 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1091 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
1092 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1093 return rc;
1094}
1095
1096
1097/**
1098 * Steps rawmode.
1099 *
1100 * @returns VBox status code.
1101 * @param pVM The VM handle.
1102 * @param pVCpu The VMCPU handle.
1103 */
1104static int emR3RawStep(PVM pVM, PVMCPU pVCpu)
1105{
1106 Assert( pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
1107 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
1108 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
1109 int rc;
1110 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1111 bool fGuest = pVCpu->em.s.enmState != EMSTATE_DEBUG_HYPER;
1112#ifndef DEBUG_sandervl
1113 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
1114 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu)));
1115#endif
1116 if (fGuest)
1117 {
1118 /*
1119 * Check vital forced actions, but ignore pending interrupts and timers.
1120 */
1121 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
1122 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1123 {
1124 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
1125 if (rc != VINF_SUCCESS)
1126 return rc;
1127 }
1128
1129 /*
1130 * Set flags for single stepping.
1131 */
1132 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
1133 }
1134 else
1135 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
1136
1137 /*
1138 * Single step.
1139 * We do not start time or anything, if anything we should just do a few nanoseconds.
1140 */
1141 CPUMRawEnter(pVCpu, NULL);
1142 do
1143 {
1144 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1145 rc = VMMR3ResumeHyper(pVM, pVCpu);
1146 else
1147 rc = VMMR3RawRunGC(pVM, pVCpu);
1148#ifndef DEBUG_sandervl
1149 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Rrc\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
1150 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu), rc));
1151#endif
1152 } while ( rc == VINF_SUCCESS
1153 || rc == VINF_EM_RAW_INTERRUPT);
1154 rc = CPUMRawLeave(pVCpu, NULL, rc);
1155 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1156
1157 /*
1158 * Make sure the trap flag is cleared.
1159 * (Too bad if the guest is trying to single step too.)
1160 */
1161 if (fGuest)
1162 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1163 else
1164 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) & ~X86_EFL_TF);
1165
1166 /*
1167 * Deal with the return codes.
1168 */
1169 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1170 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
1171 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1172 return rc;
1173}
1174
1175
1176#ifdef DEBUG
1177
1178/**
1179 * Steps hardware accelerated mode.
1180 *
1181 * @returns VBox status code.
1182 * @param pVM The VM handle.
1183 * @param pVCpu The VMCPU handle.
1184 */
1185static int emR3HwAccStep(PVM pVM, PVMCPU pVCpu)
1186{
1187 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC);
1188
1189 int rc;
1190 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1191 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
1192
1193 /*
1194 * Check vital forced actions, but ignore pending interrupts and timers.
1195 */
1196 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
1197 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1198 {
1199 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
1200 if (rc != VINF_SUCCESS)
1201 return rc;
1202 }
1203 /*
1204 * Set flags for single stepping.
1205 */
1206 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
1207
1208 /*
1209 * Single step.
1210 * We do not start time or anything, if anything we should just do a few nanoseconds.
1211 */
1212 do
1213 {
1214 rc = VMMR3HwAccRunGC(pVM, pVCpu);
1215 } while ( rc == VINF_SUCCESS
1216 || rc == VINF_EM_RAW_INTERRUPT);
1217 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1218
1219 /*
1220 * Make sure the trap flag is cleared.
1221 * (Too bad if the guest is trying to single step too.)
1222 */
1223 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1224
1225 /*
1226 * Deal with the return codes.
1227 */
1228 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1229 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
1230 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1231 return rc;
1232}
1233
1234
1235int emR3SingleStepExecRaw(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1236{
1237 int rc = VINF_SUCCESS;
1238 EMSTATE enmOldState = pVCpu->em.s.enmState;
1239 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1240
1241 Log(("Single step BEGIN:\n"));
1242 for (uint32_t i = 0; i < cIterations; i++)
1243 {
1244 DBGFR3PrgStep(pVCpu);
1245 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1246 rc = emR3RawStep(pVM, pVCpu);
1247 if (rc != VINF_SUCCESS)
1248 break;
1249 }
1250 Log(("Single step END: rc=%Rrc\n", rc));
1251 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1252 pVCpu->em.s.enmState = enmOldState;
1253 return rc;
1254}
1255
1256
1257static int emR3SingleStepExecHwAcc(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1258{
1259 int rc = VINF_SUCCESS;
1260 EMSTATE enmOldState = pVCpu->em.s.enmState;
1261 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HWACC;
1262
1263 Log(("Single step BEGIN:\n"));
1264 for (uint32_t i = 0; i < cIterations; i++)
1265 {
1266 DBGFR3PrgStep(pVCpu);
1267 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1268 rc = emR3HwAccStep(pVM, pVCpu);
1269 if ( rc != VINF_SUCCESS
1270 || !HWACCMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))
1271 break;
1272 }
1273 Log(("Single step END: rc=%Rrc\n", rc));
1274 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1275 pVCpu->em.s.enmState = enmOldState;
1276 return rc == VINF_SUCCESS ? VINF_EM_RESCHEDULE_REM : rc;
1277}
1278
1279
1280static int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1281{
1282 EMSTATE enmOldState = pVCpu->em.s.enmState;
1283
1284 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1285
1286 Log(("Single step BEGIN:\n"));
1287 for (uint32_t i = 0; i < cIterations; i++)
1288 {
1289 DBGFR3PrgStep(pVCpu);
1290 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1291 emR3RemStep(pVM, pVCpu);
1292 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1293 break;
1294 }
1295 Log(("Single step END:\n"));
1296 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1297 pVCpu->em.s.enmState = enmOldState;
1298 return VINF_EM_RESCHEDULE;
1299}
1300
1301#endif /* DEBUG */
1302
1303
1304/**
1305 * Executes one (or perhaps a few more) instruction(s).
1306 *
1307 * @returns VBox status code suitable for EM.
1308 *
1309 * @param pVM VM handle.
1310 * @param pVCpu VMCPU handle
1311 * @param rcGC GC return code
1312 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1313 * instruction and prefix the log output with this text.
1314 */
1315#ifdef LOG_ENABLED
1316static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC, const char *pszPrefix)
1317#else
1318static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
1319#endif
1320{
1321 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1322 int rc;
1323
1324 /*
1325 *
1326 * The simple solution is to use the recompiler.
1327 * The better solution is to disassemble the current instruction and
1328 * try handle as many as possible without using REM.
1329 *
1330 */
1331
1332#ifdef LOG_ENABLED
1333 /*
1334 * Disassemble the instruction if requested.
1335 */
1336 if (pszPrefix)
1337 {
1338 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
1339 DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix);
1340 }
1341#endif /* LOG_ENABLED */
1342
1343 /*
1344 * PATM is making life more interesting.
1345 * We cannot hand anything to REM which has an EIP inside patch code. So, we'll
1346 * tell PATM there is a trap in this code and have it take the appropriate actions
1347 * to allow us execute the code in REM.
1348 */
1349 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1350 {
1351 Log(("emR3RawExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)pCtx->eip));
1352
1353 RTGCPTR pNewEip;
1354 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1355 switch (rc)
1356 {
1357 /*
1358 * It's not very useful to emulate a single instruction and then go back to raw
1359 * mode; just execute the whole block until IF is set again.
1360 */
1361 case VINF_SUCCESS:
1362 Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %RGv IF=%d VMIF=%x\n",
1363 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1364 pCtx->eip = pNewEip;
1365 Assert(pCtx->eip);
1366
1367 if (pCtx->eflags.Bits.u1IF)
1368 {
1369 /*
1370 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1371 */
1372 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1373 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1374 }
1375 else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)
1376 {
1377 /* special case: iret, that sets IF, detected a pending irq/event */
1378 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIRET");
1379 }
1380 return VINF_EM_RESCHEDULE_REM;
1381
1382 /*
1383 * One instruction.
1384 */
1385 case VINF_PATCH_EMULATE_INSTR:
1386 Log(("emR3RawExecuteInstruction: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
1387 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1388 pCtx->eip = pNewEip;
1389 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1390
1391 /*
1392 * The patch was disabled, hand it to the REM.
1393 */
1394 case VERR_PATCH_DISABLED:
1395 Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %RGv IF=%d VMIF=%x\n",
1396 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1397 pCtx->eip = pNewEip;
1398 if (pCtx->eflags.Bits.u1IF)
1399 {
1400 /*
1401 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1402 */
1403 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1404 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1405 }
1406 return VINF_EM_RESCHEDULE_REM;
1407
1408 /* Force continued patch exection; usually due to write monitored stack. */
1409 case VINF_PATCH_CONTINUE:
1410 return VINF_SUCCESS;
1411
1412 default:
1413 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap\n", rc));
1414 return VERR_IPE_UNEXPECTED_STATUS;
1415 }
1416 }
1417
1418#if 0
1419 /* Try our own instruction emulator before falling back to the recompiler. */
1420 DISCPUSTATE Cpu;
1421 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
1422 if (RT_SUCCESS(rc))
1423 {
1424 uint32_t size;
1425
1426 switch (Cpu.pCurInstr->opcode)
1427 {
1428 /* @todo we can do more now */
1429 case OP_MOV:
1430 case OP_AND:
1431 case OP_OR:
1432 case OP_XOR:
1433 case OP_POP:
1434 case OP_INC:
1435 case OP_DEC:
1436 case OP_XCHG:
1437 STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
1438 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1439 if (RT_SUCCESS(rc))
1440 {
1441 pCtx->rip += Cpu.opsize;
1442#ifdef EM_NOTIFY_HWACCM
1443 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1444 HWACCMR3NotifyEmulated(pVCpu);
1445#endif
1446 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
1447 return rc;
1448 }
1449 if (rc != VERR_EM_INTERPRETER)
1450 AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
1451 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
1452 break;
1453 }
1454 }
1455#endif /* 0 */
1456 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
1457 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
1458 EMRemLock(pVM);
1459 /* Flush the recompiler TLB if the VCPU has changed. */
1460 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1461 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1462 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1463
1464 rc = REMR3EmulateInstruction(pVM, pVCpu);
1465 EMRemUnlock(pVM);
1466 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
1467
1468#ifdef EM_NOTIFY_HWACCM
1469 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1470 HWACCMR3NotifyEmulated(pVCpu);
1471#endif
1472 return rc;
1473}
1474
1475
1476/**
1477 * Executes one (or perhaps a few more) instruction(s).
1478 * This is just a wrapper for discarding pszPrefix in non-logging builds.
1479 *
1480 * @returns VBox status code suitable for EM.
1481 * @param pVM VM handle.
1482 * @param pVCpu VMCPU handle.
1483 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1484 * instruction and prefix the log output with this text.
1485 * @param rcGC GC return code
1486 */
1487DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
1488{
1489#ifdef LOG_ENABLED
1490 return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
1491#else
1492 return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC);
1493#endif
1494}
1495
1496/**
1497 * Executes one (or perhaps a few more) IO instruction(s).
1498 *
1499 * @returns VBox status code suitable for EM.
1500 * @param pVM VM handle.
1501 * @param pVCpu VMCPU handle.
1502 */
1503int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
1504{
1505 int rc;
1506 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1507
1508 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
1509
1510 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
1511 * as io instructions tend to come in packages of more than one
1512 */
1513 DISCPUSTATE Cpu;
1514 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
1515 if (RT_SUCCESS(rc))
1516 {
1517 rc = VINF_EM_RAW_EMULATE_INSTR;
1518
1519 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
1520 {
1521 switch (Cpu.pCurInstr->opcode)
1522 {
1523 case OP_IN:
1524 {
1525 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
1526 rc = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1527 break;
1528 }
1529
1530 case OP_OUT:
1531 {
1532 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
1533 rc = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1534 break;
1535 }
1536 }
1537 }
1538 else if (Cpu.prefix & PREFIX_REP)
1539 {
1540 switch (Cpu.pCurInstr->opcode)
1541 {
1542 case OP_INSB:
1543 case OP_INSWD:
1544 {
1545 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
1546 rc = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1547 break;
1548 }
1549
1550 case OP_OUTSB:
1551 case OP_OUTSWD:
1552 {
1553 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
1554 rc = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1555 break;
1556 }
1557 }
1558 }
1559
1560 /*
1561 * Handled the I/O return codes.
1562 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1563 */
1564 if (IOM_SUCCESS(rc))
1565 {
1566 pCtx->rip += Cpu.opsize;
1567 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1568 return rc;
1569 }
1570
1571 if (rc == VINF_EM_RAW_GUEST_TRAP)
1572 {
1573 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1574 rc = emR3RawGuestTrap(pVM, pVCpu);
1575 return rc;
1576 }
1577 AssertMsg(rc != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
1578
1579 if (RT_FAILURE(rc))
1580 {
1581 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1582 return rc;
1583 }
1584 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RESCHEDULE_REM, ("rc=%Rrc\n", rc));
1585 }
1586 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1587 return emR3RawExecuteInstruction(pVM, pVCpu, "IO: ");
1588}
1589
1590
1591/**
1592 * Handle a guest context trap.
1593 *
1594 * @returns VBox status code suitable for EM.
1595 * @param pVM VM handle.
1596 * @param pVCpu VMCPU handle.
1597 */
1598static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)
1599{
1600 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1601
1602 /*
1603 * Get the trap info.
1604 */
1605 uint8_t u8TrapNo;
1606 TRPMEVENT enmType;
1607 RTGCUINT uErrorCode;
1608 RTGCUINTPTR uCR2;
1609 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1610 if (RT_FAILURE(rc))
1611 {
1612 AssertReleaseMsgFailed(("No trap! (rc=%Rrc)\n", rc));
1613 return rc;
1614 }
1615
1616 /*
1617 * Traps can be directly forwarded in hardware accelerated mode.
1618 */
1619 if (HWACCMIsEnabled(pVM))
1620 {
1621#ifdef LOGGING_ENABLED
1622 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1623 DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");
1624#endif
1625 return VINF_EM_RESCHEDULE_HWACC;
1626 }
1627
1628#if 1 /* Experimental: Review, disable if it causes trouble. */
1629 /*
1630 * Handle traps in patch code first.
1631 *
1632 * We catch a few of these cases in RC before returning to R3 (#PF, #GP, #BP)
1633 * but several traps isn't handled specially by TRPM in RC and we end up here
1634 * instead. One example is #DE.
1635 */
1636 uint32_t uCpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
1637 if ( uCpl == 0
1638 && PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
1639 {
1640 LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, pCtx->eip));
1641 return emR3PatchTrap(pVM, pVCpu, pCtx, rc);
1642 }
1643#endif
1644
1645 /*
1646 * If the guest gate is marked unpatched, then we will check again if we can patch it.
1647 * (This assumes that we've already tried and failed to dispatch the trap in
1648 * RC for the gates that already has been patched. Which is true for most high
1649 * volume traps, because these are handled specially, but not for odd ones like #DE.)
1650 */
1651 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) == TRPM_INVALID_HANDLER)
1652 {
1653 CSAMR3CheckGates(pVM, u8TrapNo, 1);
1654 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8TrapNo, TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER));
1655
1656 /* If it was successful, then we could go back to raw mode. */
1657 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER)
1658 {
1659 /* Must check pending forced actions as our IDT or GDT might be out of sync. */
1660 rc = EMR3CheckRawForcedActions(pVM, pVCpu);
1661 AssertRCReturn(rc, rc);
1662
1663 TRPMERRORCODE enmError = uErrorCode != ~0U
1664 ? TRPM_TRAP_HAS_ERRORCODE
1665 : TRPM_TRAP_NO_ERRORCODE;
1666 rc = TRPMForwardTrap(pVCpu, CPUMCTX2CORE(pCtx), u8TrapNo, uErrorCode, enmError, TRPM_TRAP, -1);
1667 if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */)
1668 {
1669 TRPMResetTrap(pVCpu);
1670 return VINF_EM_RESCHEDULE_RAW;
1671 }
1672 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP, ("%Rrc\n", rc));
1673 }
1674 }
1675
1676 /*
1677 * Scan kernel code that traps; we might not get another chance.
1678 */
1679 /** @todo move this up before the dispatching? */
1680 if ( (pCtx->ss & X86_SEL_RPL) <= 1
1681 && !pCtx->eflags.Bits.u1VM)
1682 {
1683 Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
1684 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1685 }
1686
1687 /*
1688 * Trap specific handling.
1689 */
1690 if (u8TrapNo == 6) /* (#UD) Invalid opcode. */
1691 {
1692 /*
1693 * If MONITOR & MWAIT are supported, then interpret them here.
1694 */
1695 DISCPUSTATE cpu;
1696 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): ");
1697 if ( RT_SUCCESS(rc)
1698 && (cpu.pCurInstr->opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))
1699 {
1700 uint32_t u32Dummy, u32Features, u32ExtFeatures;
1701 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Features);
1702 if (u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)
1703 {
1704 rc = TRPMResetTrap(pVCpu);
1705 AssertRC(rc);
1706
1707 uint32_t opsize;
1708 rc = EMInterpretInstructionCPU(pVM, pVCpu, &cpu, CPUMCTX2CORE(pCtx), 0, &opsize);
1709 if (RT_SUCCESS(rc))
1710 {
1711 pCtx->rip += cpu.opsize;
1712#ifdef EM_NOTIFY_HWACCM
1713 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1714 HWACCMR3NotifyEmulated(pVCpu);
1715#endif
1716 return rc;
1717 }
1718 return emR3RawExecuteInstruction(pVM, pVCpu, "Monitor: ");
1719 }
1720 }
1721 }
1722 else if (u8TrapNo == 13) /* (#GP) Privileged exception */
1723 {
1724 /*
1725 * Handle I/O bitmap?
1726 */
1727 /** @todo We're not supposed to be here with a false guest trap concerning
1728 * I/O access. We can easily handle those in RC. */
1729 DISCPUSTATE cpu;
1730 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap: ");
1731 if ( RT_SUCCESS(rc)
1732 && (cpu.pCurInstr->optype & OPTYPE_PORTIO))
1733 {
1734 /*
1735 * We should really check the TSS for the IO bitmap, but it's not like this
1736 * lazy approach really makes things worse.
1737 */
1738 rc = TRPMResetTrap(pVCpu);
1739 AssertRC(rc);
1740 return emR3RawExecuteInstruction(pVM, pVCpu, "IO Guest Trap: ");
1741 }
1742 }
1743
1744#ifdef LOG_ENABLED
1745 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1746 DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");
1747
1748 /* Get guest page information. */
1749 uint64_t fFlags = 0;
1750 RTGCPHYS GCPhys = 0;
1751 int rc2 = PGMGstGetPage(pVCpu, uCR2, &fFlags, &GCPhys);
1752 Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%RGp fFlags=%08llx %s %s %s%s rc2=%d\n",
1753 pCtx->cs, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0, (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags,
1754 fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S",
1755 fFlags & X86_PTE_RW ? "RW" : "R0", fFlags & X86_PTE_G ? " G" : "", rc2));
1756#endif
1757
1758 /*
1759 * #PG has CR2.
1760 * (Because of stuff like above we must set CR2 in a delayed fashion.)
1761 */
1762 if (u8TrapNo == 14 /* #PG */)
1763 pCtx->cr2 = uCR2;
1764
1765 return VINF_EM_RESCHEDULE_REM;
1766}
1767
1768
1769/**
1770 * Handle a ring switch trap.
1771 * Need to do statistics and to install patches. The result is going to REM.
1772 *
1773 * @returns VBox status code suitable for EM.
1774 * @param pVM VM handle.
1775 * @param pVCpu VMCPU handle.
1776 */
1777int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu)
1778{
1779 int rc;
1780 DISCPUSTATE Cpu;
1781 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1782
1783 /*
1784 * sysenter, syscall & callgate
1785 */
1786 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "RSWITCH: ");
1787 if (RT_SUCCESS(rc))
1788 {
1789 if (Cpu.pCurInstr->opcode == OP_SYSENTER)
1790 {
1791 if (pCtx->SysEnter.cs != 0)
1792 {
1793 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
1794 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
1795 if (RT_SUCCESS(rc))
1796 {
1797 DBGFR3DisasInstrCurrentLog(pVCpu, "Patched sysenter instruction");
1798 return VINF_EM_RESCHEDULE_RAW;
1799 }
1800 }
1801 }
1802
1803#ifdef VBOX_WITH_STATISTICS
1804 switch (Cpu.pCurInstr->opcode)
1805 {
1806 case OP_SYSENTER:
1807 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysEnter);
1808 break;
1809 case OP_SYSEXIT:
1810 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysExit);
1811 break;
1812 case OP_SYSCALL:
1813 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysCall);
1814 break;
1815 case OP_SYSRET:
1816 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysRet);
1817 break;
1818 }
1819#endif
1820 }
1821 else
1822 AssertRC(rc);
1823
1824 /* go to the REM to emulate a single instruction */
1825 return emR3RawExecuteInstruction(pVM, pVCpu, "RSWITCH: ");
1826}
1827
1828
1829/**
1830 * Handle a trap (\#PF or \#GP) in patch code
1831 *
1832 * @returns VBox status code suitable for EM.
1833 * @param pVM VM handle.
1834 * @param pVCpu VMCPU handle.
1835 * @param pCtx CPU context
1836 * @param gcret GC return code
1837 */
1838static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
1839{
1840 uint8_t u8TrapNo;
1841 int rc;
1842 TRPMEVENT enmType;
1843 RTGCUINT uErrorCode;
1844 RTGCUINTPTR uCR2;
1845
1846 Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));
1847
1848 if (gcret == VINF_PATM_PATCH_INT3)
1849 {
1850 u8TrapNo = 3;
1851 uCR2 = 0;
1852 uErrorCode = 0;
1853 }
1854 else if (gcret == VINF_PATM_PATCH_TRAP_GP)
1855 {
1856 /* No active trap in this case. Kind of ugly. */
1857 u8TrapNo = X86_XCPT_GP;
1858 uCR2 = 0;
1859 uErrorCode = 0;
1860 }
1861 else
1862 {
1863 rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1864 if (RT_FAILURE(rc))
1865 {
1866 AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Rrc) gcret=%Rrc\n", rc, gcret));
1867 return rc;
1868 }
1869 /* Reset the trap as we'll execute the original instruction again. */
1870 TRPMResetTrap(pVCpu);
1871 }
1872
1873 /*
1874 * Deal with traps inside patch code.
1875 * (This code won't run outside GC.)
1876 */
1877 if (u8TrapNo != 1)
1878 {
1879#ifdef LOG_ENABLED
1880 DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");
1881 DBGFR3DisasInstrCurrentLog(pVCpu, "Patch code");
1882
1883 DISCPUSTATE Cpu;
1884 int rc;
1885
1886 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->eip, &Cpu, "Patch code: ");
1887 if ( RT_SUCCESS(rc)
1888 && Cpu.pCurInstr->opcode == OP_IRET)
1889 {
1890 uint32_t eip, selCS, uEFlags;
1891
1892 /* Iret crashes are bad as we have already changed the flags on the stack */
1893 rc = PGMPhysSimpleReadGCPtr(pVCpu, &eip, pCtx->esp, 4);
1894 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selCS, pCtx->esp+4, 4);
1895 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &uEFlags, pCtx->esp+8, 4);
1896 if (rc == VINF_SUCCESS)
1897 {
1898 if ( (uEFlags & X86_EFL_VM)
1899 || (selCS & X86_SEL_RPL) == 3)
1900 {
1901 uint32_t selSS, esp;
1902
1903 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &esp, pCtx->esp + 12, 4);
1904 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selSS, pCtx->esp + 16, 4);
1905
1906 if (uEFlags & X86_EFL_VM)
1907 {
1908 uint32_t selDS, selES, selFS, selGS;
1909 rc = PGMPhysSimpleReadGCPtr(pVCpu, &selES, pCtx->esp + 20, 4);
1910 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selDS, pCtx->esp + 24, 4);
1911 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selFS, pCtx->esp + 28, 4);
1912 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selGS, pCtx->esp + 32, 4);
1913 if (rc == VINF_SUCCESS)
1914 {
1915 Log(("Patch code: IRET->VM stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
1916 Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
1917 }
1918 }
1919 else
1920 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
1921 }
1922 else
1923 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x\n", selCS, eip, uEFlags));
1924 }
1925 }
1926#endif /* LOG_ENABLED */
1927 Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
1928 pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));
1929
1930 RTGCPTR pNewEip;
1931 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1932 switch (rc)
1933 {
1934 /*
1935 * Execute the faulting instruction.
1936 */
1937 case VINF_SUCCESS:
1938 {
1939 /** @todo execute a whole block */
1940 Log(("emR3PatchTrap: Executing faulting instruction at new address %RGv\n", pNewEip));
1941 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1942 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1943
1944 pCtx->eip = pNewEip;
1945 AssertRelease(pCtx->eip);
1946
1947 if (pCtx->eflags.Bits.u1IF)
1948 {
1949 /* Windows XP lets irets fault intentionally and then takes action based on the opcode; an
1950 * int3 patch overwrites it and leads to blue screens. Remove the patch in this case.
1951 */
1952 if ( u8TrapNo == X86_XCPT_GP
1953 && PATMIsInt3Patch(pVM, pCtx->eip, NULL, NULL))
1954 {
1955 /** @todo move to PATMR3HandleTrap */
1956 Log(("Possible Windows XP iret fault at %08RX32\n", pCtx->eip));
1957 PATMR3RemovePatch(pVM, pCtx->eip);
1958 }
1959
1960 /** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */
1961 /* Note: possibly because a reschedule is required (e.g. iret to V86 code) */
1962
1963 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1964 /* Interrupts are enabled; just go back to the original instruction.
1965 return VINF_SUCCESS; */
1966 }
1967 return VINF_EM_RESCHEDULE_REM;
1968 }
1969
1970 /*
1971 * One instruction.
1972 */
1973 case VINF_PATCH_EMULATE_INSTR:
1974 Log(("emR3PatchTrap: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
1975 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1976 pCtx->eip = pNewEip;
1977 AssertRelease(pCtx->eip);
1978 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHEMUL: ");
1979
1980 /*
1981 * The patch was disabled, hand it to the REM.
1982 */
1983 case VERR_PATCH_DISABLED:
1984 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1985 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1986 pCtx->eip = pNewEip;
1987 AssertRelease(pCtx->eip);
1988
1989 if (pCtx->eflags.Bits.u1IF)
1990 {
1991 /*
1992 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1993 */
1994 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1995 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1996 }
1997 return VINF_EM_RESCHEDULE_REM;
1998
1999 /* Force continued patch exection; usually due to write monitored stack. */
2000 case VINF_PATCH_CONTINUE:
2001 return VINF_SUCCESS;
2002
2003 /*
2004 * Anything else is *fatal*.
2005 */
2006 default:
2007 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap!\n", rc));
2008 return VERR_IPE_UNEXPECTED_STATUS;
2009 }
2010 }
2011 return VINF_SUCCESS;
2012}
2013
2014
2015/**
2016 * Handle a privileged instruction.
2017 *
2018 * @returns VBox status code suitable for EM.
2019 * @param pVM VM handle.
2020 * @param pVCpu VMCPU handle;
2021 */
2022int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
2023{
2024 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);
2025 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2026
2027 Assert(!pCtx->eflags.Bits.u1VM);
2028
2029 if (PATMIsEnabled(pVM))
2030 {
2031 /*
2032 * Check if in patch code.
2033 */
2034 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2035 {
2036#ifdef LOG_ENABLED
2037 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2038#endif
2039 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));
2040 return VERR_EM_RAW_PATCH_CONFLICT;
2041 }
2042 if ( (pCtx->ss & X86_SEL_RPL) == 0
2043 && !pCtx->eflags.Bits.u1VM
2044 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
2045 {
2046 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
2047 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
2048 if (RT_SUCCESS(rc))
2049 {
2050#ifdef LOG_ENABLED
2051 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2052#endif
2053 DBGFR3DisasInstrCurrentLog(pVCpu, "Patched privileged instruction");
2054 return VINF_SUCCESS;
2055 }
2056 }
2057 }
2058
2059#ifdef LOG_ENABLED
2060 if (!PATMIsPatchGCAddr(pVM, pCtx->eip))
2061 {
2062 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2063 DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
2064 }
2065#endif
2066
2067 /*
2068 * Instruction statistics and logging.
2069 */
2070 DISCPUSTATE Cpu;
2071 int rc;
2072
2073 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "PRIV: ");
2074 if (RT_SUCCESS(rc))
2075 {
2076#ifdef VBOX_WITH_STATISTICS
2077 PEMSTATS pStats = pVCpu->em.s.CTX_SUFF(pStats);
2078 switch (Cpu.pCurInstr->opcode)
2079 {
2080 case OP_INVLPG:
2081 STAM_COUNTER_INC(&pStats->StatInvlpg);
2082 break;
2083 case OP_IRET:
2084 STAM_COUNTER_INC(&pStats->StatIret);
2085 break;
2086 case OP_CLI:
2087 STAM_COUNTER_INC(&pStats->StatCli);
2088 emR3RecordCli(pVM, pVCpu, pCtx->rip);
2089 break;
2090 case OP_STI:
2091 STAM_COUNTER_INC(&pStats->StatSti);
2092 break;
2093 case OP_INSB:
2094 case OP_INSWD:
2095 case OP_IN:
2096 case OP_OUTSB:
2097 case OP_OUTSWD:
2098 case OP_OUT:
2099 AssertMsgFailed(("Unexpected privileged exception due to port IO\n"));
2100 break;
2101
2102 case OP_MOV_CR:
2103 if (Cpu.param1.flags & USE_REG_GEN32)
2104 {
2105 //read
2106 Assert(Cpu.param2.flags & USE_REG_CR);
2107 Assert(Cpu.param2.base.reg_ctrl <= USE_REG_CR4);
2108 STAM_COUNTER_INC(&pStats->StatMovReadCR[Cpu.param2.base.reg_ctrl]);
2109 }
2110 else
2111 {
2112 //write
2113 Assert(Cpu.param1.flags & USE_REG_CR);
2114 Assert(Cpu.param1.base.reg_ctrl <= USE_REG_CR4);
2115 STAM_COUNTER_INC(&pStats->StatMovWriteCR[Cpu.param1.base.reg_ctrl]);
2116 }
2117 break;
2118
2119 case OP_MOV_DR:
2120 STAM_COUNTER_INC(&pStats->StatMovDRx);
2121 break;
2122 case OP_LLDT:
2123 STAM_COUNTER_INC(&pStats->StatMovLldt);
2124 break;
2125 case OP_LIDT:
2126 STAM_COUNTER_INC(&pStats->StatMovLidt);
2127 break;
2128 case OP_LGDT:
2129 STAM_COUNTER_INC(&pStats->StatMovLgdt);
2130 break;
2131 case OP_SYSENTER:
2132 STAM_COUNTER_INC(&pStats->StatSysEnter);
2133 break;
2134 case OP_SYSEXIT:
2135 STAM_COUNTER_INC(&pStats->StatSysExit);
2136 break;
2137 case OP_SYSCALL:
2138 STAM_COUNTER_INC(&pStats->StatSysCall);
2139 break;
2140 case OP_SYSRET:
2141 STAM_COUNTER_INC(&pStats->StatSysRet);
2142 break;
2143 case OP_HLT:
2144 STAM_COUNTER_INC(&pStats->StatHlt);
2145 break;
2146 default:
2147 STAM_COUNTER_INC(&pStats->StatMisc);
2148 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->opcode));
2149 break;
2150 }
2151#endif /* VBOX_WITH_STATISTICS */
2152 if ( (pCtx->ss & X86_SEL_RPL) == 0
2153 && !pCtx->eflags.Bits.u1VM
2154 && SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT)
2155 {
2156 uint32_t size;
2157
2158 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);
2159 switch (Cpu.pCurInstr->opcode)
2160 {
2161 case OP_CLI:
2162 pCtx->eflags.u32 &= ~X86_EFL_IF;
2163 Assert(Cpu.opsize == 1);
2164 pCtx->rip += Cpu.opsize;
2165 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2166 return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */
2167
2168 case OP_STI:
2169 pCtx->eflags.u32 |= X86_EFL_IF;
2170 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + Cpu.opsize);
2171 Assert(Cpu.opsize == 1);
2172 pCtx->rip += Cpu.opsize;
2173 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2174 return VINF_SUCCESS;
2175
2176 case OP_HLT:
2177 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
2178 {
2179 PATMTRANSSTATE enmState;
2180 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
2181
2182 if (enmState == PATMTRANS_OVERWRITTEN)
2183 {
2184 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
2185 Assert(rc == VERR_PATCH_DISABLED);
2186 /* Conflict detected, patch disabled */
2187 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %08RX32\n", pCtx->eip));
2188
2189 enmState = PATMTRANS_SAFE;
2190 }
2191
2192 /* The translation had better be successful. Otherwise we can't recover. */
2193 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %08RX32\n", pCtx->eip));
2194 if (enmState != PATMTRANS_OVERWRITTEN)
2195 pCtx->eip = pOrgInstrGC;
2196 }
2197 /* no break; we could just return VINF_EM_HALT here */
2198
2199 case OP_MOV_CR:
2200 case OP_MOV_DR:
2201#ifdef LOG_ENABLED
2202 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2203 {
2204 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2205 DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
2206 }
2207#endif
2208
2209 rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
2210 if (RT_SUCCESS(rc))
2211 {
2212 pCtx->rip += Cpu.opsize;
2213#ifdef EM_NOTIFY_HWACCM
2214 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
2215 HWACCMR3NotifyEmulated(pVCpu);
2216#endif
2217 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2218
2219 if ( Cpu.pCurInstr->opcode == OP_MOV_CR
2220 && Cpu.param1.flags == USE_REG_CR /* write */
2221 )
2222 {
2223 /* Deal with CR0 updates inside patch code that force
2224 * us to go to the recompiler.
2225 */
2226 if ( PATMIsPatchGCAddr(pVM, pCtx->rip)
2227 && (pCtx->cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE))
2228 {
2229 PATMTRANSSTATE enmState;
2230 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->rip, &enmState);
2231
2232 Log(("Force recompiler switch due to cr0 (%RGp) update rip=%RGv -> %RGv (enmState=%d)\n", pCtx->cr0, pCtx->rip, pOrgInstrGC, enmState));
2233 if (enmState == PATMTRANS_OVERWRITTEN)
2234 {
2235 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
2236 Assert(rc == VERR_PATCH_DISABLED);
2237 /* Conflict detected, patch disabled */
2238 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %RGv\n", (RTGCPTR)pCtx->rip));
2239 enmState = PATMTRANS_SAFE;
2240 }
2241 /* The translation had better be successful. Otherwise we can't recover. */
2242 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %RGv\n", (RTGCPTR)pCtx->rip));
2243 if (enmState != PATMTRANS_OVERWRITTEN)
2244 pCtx->rip = pOrgInstrGC;
2245 }
2246
2247 /* Reschedule is necessary as the execution/paging mode might have changed. */
2248 return VINF_EM_RESCHEDULE;
2249 }
2250 return rc; /* can return VINF_EM_HALT as well. */
2251 }
2252 AssertMsgReturn(rc == VERR_EM_INTERPRETER, ("%Rrc\n", rc), rc);
2253 break; /* fall back to the recompiler */
2254 }
2255 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2256 }
2257 }
2258
2259 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2260 return emR3PatchTrap(pVM, pVCpu, pCtx, VINF_PATM_PATCH_TRAP_GP);
2261
2262 return emR3RawExecuteInstruction(pVM, pVCpu, "PRIV");
2263}
2264
2265
2266/**
2267 * Update the forced rawmode execution modifier.
2268 *
2269 * This function is called when we're returning from the raw-mode loop(s). If we're
2270 * in patch code, it will set a flag forcing execution to be resumed in raw-mode,
2271 * if not in patch code, the flag will be cleared.
2272 *
2273 * We should never interrupt patch code while it's being executed. Cli patches can
2274 * contain big code blocks, but they are always executed with IF=0. Other patches
2275 * replace single instructions and should be atomic.
2276 *
2277 * @returns Updated rc.
2278 *
2279 * @param pVM The VM handle.
2280 * @param pVCpu The VMCPU handle.
2281 * @param pCtx The guest CPU context.
2282 * @param rc The result code.
2283 */
2284DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
2285{
2286 if (PATMIsPatchGCAddr(pVM, pCtx->eip)) /** @todo check cs selector base/type */
2287 {
2288 /* ignore reschedule attempts. */
2289 switch (rc)
2290 {
2291 case VINF_EM_RESCHEDULE:
2292 case VINF_EM_RESCHEDULE_REM:
2293 LogFlow(("emR3RawUpdateForceFlag: patch address -> force raw reschedule\n"));
2294 rc = VINF_SUCCESS;
2295 break;
2296 }
2297 pVCpu->em.s.fForceRAW = true;
2298 }
2299 else
2300 pVCpu->em.s.fForceRAW = false;
2301 return rc;
2302}
2303
2304
2305/**
2306 * Process a subset of the raw-mode return code.
2307 *
2308 * Since we have to share this with raw-mode single stepping, this inline
2309 * function has been created to avoid code duplication.
2310 *
2311 * @returns VINF_SUCCESS if it's ok to continue raw mode.
2312 * @returns VBox status code to return to the EM main loop.
2313 *
2314 * @param pVM The VM handle
2315 * @param pVCpu The VMCPU handle
2316 * @param rc The return code.
2317 * @param pCtx The guest cpu context.
2318 */
2319DECLINLINE(int) emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
2320{
2321 switch (rc)
2322 {
2323 /*
2324 * Common & simple ones.
2325 */
2326 case VINF_SUCCESS:
2327 break;
2328 case VINF_EM_RESCHEDULE_RAW:
2329 case VINF_EM_RESCHEDULE_HWACC:
2330 case VINF_EM_RAW_INTERRUPT:
2331 case VINF_EM_RAW_TO_R3:
2332 case VINF_EM_RAW_TIMER_PENDING:
2333 case VINF_EM_PENDING_REQUEST:
2334 rc = VINF_SUCCESS;
2335 break;
2336
2337 /*
2338 * Privileged instruction.
2339 */
2340 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2341 case VINF_PATM_PATCH_TRAP_GP:
2342 rc = emR3RawPrivileged(pVM, pVCpu);
2343 break;
2344
2345 /*
2346 * Got a trap which needs dispatching.
2347 */
2348 case VINF_EM_RAW_GUEST_TRAP:
2349 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2350 {
2351 AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVCpu)));
2352 rc = VERR_EM_RAW_PATCH_CONFLICT;
2353 break;
2354 }
2355 rc = emR3RawGuestTrap(pVM, pVCpu);
2356 break;
2357
2358 /*
2359 * Trap in patch code.
2360 */
2361 case VINF_PATM_PATCH_TRAP_PF:
2362 case VINF_PATM_PATCH_INT3:
2363 rc = emR3PatchTrap(pVM, pVCpu, pCtx, rc);
2364 break;
2365
2366 case VINF_PATM_DUPLICATE_FUNCTION:
2367 Assert(PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2368 rc = PATMR3DuplicateFunctionRequest(pVM, pCtx);
2369 AssertRC(rc);
2370 rc = VINF_SUCCESS;
2371 break;
2372
2373 case VINF_PATM_CHECK_PATCH_PAGE:
2374 rc = PATMR3HandleMonitoredPage(pVM);
2375 AssertRC(rc);
2376 rc = VINF_SUCCESS;
2377 break;
2378
2379 /*
2380 * Patch manager.
2381 */
2382 case VERR_EM_RAW_PATCH_CONFLICT:
2383 AssertReleaseMsgFailed(("%Rrc handling is not yet implemented\n", rc));
2384 break;
2385
2386#ifdef VBOX_WITH_VMI
2387 /*
2388 * PARAV function.
2389 */
2390 case VINF_EM_RESCHEDULE_PARAV:
2391 rc = PARAVCallFunction(pVM);
2392 break;
2393#endif
2394
2395 /*
2396 * Memory mapped I/O access - attempt to patch the instruction
2397 */
2398 case VINF_PATM_HC_MMIO_PATCH_READ:
2399 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
2400 PATMFL_MMIO_ACCESS | ((SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0));
2401 if (RT_FAILURE(rc))
2402 rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
2403 break;
2404
2405 case VINF_PATM_HC_MMIO_PATCH_WRITE:
2406 AssertFailed(); /* not yet implemented. */
2407 rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
2408 break;
2409
2410 /*
2411 * Conflict or out of page tables.
2412 *
2413 * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
2414 * do here is to execute the pending forced actions.
2415 */
2416 case VINF_PGM_SYNC_CR3:
2417 AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
2418 ("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
2419 rc = VINF_SUCCESS;
2420 break;
2421
2422 /*
2423 * Paging mode change.
2424 */
2425 case VINF_PGM_CHANGE_MODE:
2426 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2427 if (rc == VINF_SUCCESS)
2428 rc = VINF_EM_RESCHEDULE;
2429 AssertMsg(RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST), ("%Rrc\n", rc));
2430 break;
2431
2432 /*
2433 * CSAM wants to perform a task in ring-3. It has set an FF action flag.
2434 */
2435 case VINF_CSAM_PENDING_ACTION:
2436 rc = VINF_SUCCESS;
2437 break;
2438
2439 /*
2440 * Invoked Interrupt gate - must directly (!) go to the recompiler.
2441 */
2442 case VINF_EM_RAW_INTERRUPT_PENDING:
2443 case VINF_EM_RAW_RING_SWITCH_INT:
2444 Assert(TRPMHasTrap(pVCpu));
2445 Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2446
2447 if (TRPMHasTrap(pVCpu))
2448 {
2449 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
2450 uint8_t u8Interrupt = TRPMGetTrapNo(pVCpu);
2451 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
2452 {
2453 CSAMR3CheckGates(pVM, u8Interrupt, 1);
2454 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
2455 /* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
2456 }
2457 }
2458 rc = VINF_EM_RESCHEDULE_REM;
2459 break;
2460
2461 /*
2462 * Other ring switch types.
2463 */
2464 case VINF_EM_RAW_RING_SWITCH:
2465 rc = emR3RawRingSwitch(pVM, pVCpu);
2466 break;
2467
2468 /*
2469 * I/O Port access - emulate the instruction.
2470 */
2471 case VINF_IOM_HC_IOPORT_READ:
2472 case VINF_IOM_HC_IOPORT_WRITE:
2473 rc = emR3RawExecuteIOInstruction(pVM, pVCpu);
2474 break;
2475
2476 /*
2477 * Memory mapped I/O access - emulate the instruction.
2478 */
2479 case VINF_IOM_HC_MMIO_READ:
2480 case VINF_IOM_HC_MMIO_WRITE:
2481 case VINF_IOM_HC_MMIO_READ_WRITE:
2482 rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
2483 break;
2484
2485 /*
2486 * (MM)IO intensive code block detected; fall back to the recompiler for better performance
2487 */
2488 case VINF_EM_RAW_EMULATE_IO_BLOCK:
2489 rc = HWACCMR3EmulateIoBlock(pVM, pCtx);
2490 break;
2491
2492 /*
2493 * Execute instruction.
2494 */
2495 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
2496 rc = emR3RawExecuteInstruction(pVM, pVCpu, "LDT FAULT: ");
2497 break;
2498 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
2499 rc = emR3RawExecuteInstruction(pVM, pVCpu, "GDT FAULT: ");
2500 break;
2501 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
2502 rc = emR3RawExecuteInstruction(pVM, pVCpu, "IDT FAULT: ");
2503 break;
2504 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
2505 rc = emR3RawExecuteInstruction(pVM, pVCpu, "TSS FAULT: ");
2506 break;
2507 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
2508 rc = emR3RawExecuteInstruction(pVM, pVCpu, "PD FAULT: ");
2509 break;
2510
2511 case VINF_EM_RAW_EMULATE_INSTR_HLT:
2512 /** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */
2513 rc = emR3RawPrivileged(pVM, pVCpu);
2514 break;
2515
2516 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
2517 rc = emR3RawExecuteInstruction(pVM, pVCpu, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
2518 break;
2519
2520 case VINF_EM_RAW_EMULATE_INSTR:
2521 case VINF_PATCH_EMULATE_INSTR:
2522 rc = emR3RawExecuteInstruction(pVM, pVCpu, "EMUL: ");
2523 break;
2524
2525 /*
2526 * Stale selector and iret traps => REM.
2527 */
2528 case VINF_EM_RAW_STALE_SELECTOR:
2529 case VINF_EM_RAW_IRET_TRAP:
2530 /* We will not go to the recompiler if EIP points to patch code. */
2531 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2532 {
2533 pCtx->eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pCtx->eip, 0);
2534 }
2535 LogFlow(("emR3RawHandleRC: %Rrc -> %Rrc\n", rc, VINF_EM_RESCHEDULE_REM));
2536 rc = VINF_EM_RESCHEDULE_REM;
2537 break;
2538
2539 /*
2540 * Up a level.
2541 */
2542 case VINF_EM_TERMINATE:
2543 case VINF_EM_OFF:
2544 case VINF_EM_RESET:
2545 case VINF_EM_SUSPEND:
2546 case VINF_EM_HALT:
2547 case VINF_EM_RESUME:
2548 case VINF_EM_NO_MEMORY:
2549 case VINF_EM_RESCHEDULE:
2550 case VINF_EM_RESCHEDULE_REM:
2551 case VINF_EM_WAIT_SIPI:
2552 break;
2553
2554 /*
2555 * Up a level and invoke the debugger.
2556 */
2557 case VINF_EM_DBG_STEPPED:
2558 case VINF_EM_DBG_BREAKPOINT:
2559 case VINF_EM_DBG_STEP:
2560 case VINF_EM_DBG_HYPER_BREAKPOINT:
2561 case VINF_EM_DBG_HYPER_STEPPED:
2562 case VINF_EM_DBG_HYPER_ASSERTION:
2563 case VINF_EM_DBG_STOP:
2564 break;
2565
2566 /*
2567 * Up a level, dump and debug.
2568 */
2569 case VERR_TRPM_DONT_PANIC:
2570 case VERR_TRPM_PANIC:
2571 case VERR_VMM_RING0_ASSERTION:
2572 break;
2573
2574 /*
2575 * Up a level, after HwAccM have done some release logging.
2576 */
2577 case VERR_VMX_INVALID_VMCS_FIELD:
2578 case VERR_VMX_INVALID_VMCS_PTR:
2579 case VERR_VMX_INVALID_VMXON_PTR:
2580 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE:
2581 case VERR_VMX_UNEXPECTED_EXCEPTION:
2582 case VERR_VMX_UNEXPECTED_EXIT_CODE:
2583 case VERR_VMX_INVALID_GUEST_STATE:
2584 case VERR_VMX_UNABLE_TO_START_VM:
2585 case VERR_VMX_UNABLE_TO_RESUME_VM:
2586 HWACCMR3CheckError(pVM, rc);
2587 break;
2588 /*
2589 * Anything which is not known to us means an internal error
2590 * and the termination of the VM!
2591 */
2592 default:
2593 AssertMsgFailed(("Unknown GC return code: %Rra\n", rc));
2594 break;
2595 }
2596 return rc;
2597}
2598
2599
2600/**
2601 * Check for pending raw actions
2602 *
2603 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
2604 * EM statuses.
2605 * @param pVM The VM to operate on.
2606 * @param pVCpu The VMCPU handle.
2607 */
2608VMMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM, PVMCPU pVCpu)
2609{
2610 return emR3RawForcedActions(pVM, pVCpu, pVCpu->em.s.pCtx);
2611}
2612
2613
2614/**
2615 * Process raw-mode specific forced actions.
2616 *
2617 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
2618 *
2619 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
2620 * EM statuses.
2621 * @param pVM The VM handle.
2622 * @param pVCpu The VMCPU handle.
2623 * @param pCtx The guest CPUM register context.
2624 */
2625static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2626{
2627 /*
2628 * Note that the order is *vitally* important!
2629 * Also note that SELMR3UpdateFromCPUM may trigger VM_FF_SELM_SYNC_TSS.
2630 */
2631
2632
2633 /*
2634 * Sync selector tables.
2635 */
2636 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))
2637 {
2638 int rc = SELMR3UpdateFromCPUM(pVM, pVCpu);
2639 if (RT_FAILURE(rc))
2640 return rc;
2641 }
2642
2643 /*
2644 * Sync IDT.
2645 *
2646 * The CSAMR3CheckGates call in TRPMR3SyncIDT may call PGMPrefetchPage
2647 * and PGMShwModifyPage, so we're in for trouble if for instance a
2648 * PGMSyncCR3+pgmPoolClearAll is pending.
2649 */
2650 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))
2651 {
2652 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
2653 && EMIsRawRing0Enabled(pVM)
2654 && CSAMIsEnabled(pVM))
2655 {
2656 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2657 if (RT_FAILURE(rc))
2658 return rc;
2659 }
2660
2661 int rc = TRPMR3SyncIDT(pVM, pVCpu);
2662 if (RT_FAILURE(rc))
2663 return rc;
2664 }
2665
2666 /*
2667 * Sync TSS.
2668 */
2669 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
2670 {
2671 int rc = SELMR3SyncTSS(pVM, pVCpu);
2672 if (RT_FAILURE(rc))
2673 return rc;
2674 }
2675
2676 /*
2677 * Sync page directory.
2678 */
2679 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
2680 {
2681 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2682 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2683 if (RT_FAILURE(rc))
2684 return rc;
2685
2686 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
2687
2688 /* Prefetch pages for EIP and ESP. */
2689 /** @todo This is rather expensive. Should investigate if it really helps at all. */
2690 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
2691 if (rc == VINF_SUCCESS)
2692 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
2693 if (rc != VINF_SUCCESS)
2694 {
2695 if (rc != VINF_PGM_SYNC_CR3)
2696 {
2697 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2698 return rc;
2699 }
2700 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2701 if (RT_FAILURE(rc))
2702 return rc;
2703 }
2704 /** @todo maybe prefetch the supervisor stack page as well */
2705 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
2706 }
2707
2708 /*
2709 * Allocate handy pages (just in case the above actions have consumed some pages).
2710 */
2711 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2712 {
2713 int rc = PGMR3PhysAllocateHandyPages(pVM);
2714 if (RT_FAILURE(rc))
2715 return rc;
2716 }
2717
2718 /*
2719 * Check whether we're out of memory now.
2720 *
2721 * This may stem from some of the above actions or operations that has been executed
2722 * since we ran FFs. The allocate handy pages must for instance always be followed by
2723 * this check.
2724 */
2725 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
2726 return VINF_EM_NO_MEMORY;
2727
2728 return VINF_SUCCESS;
2729}
2730
2731
2732/**
2733 * Executes raw code.
2734 *
2735 * This function contains the raw-mode version of the inner
2736 * execution loop (the outer loop being in EMR3ExecuteVM()).
2737 *
2738 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
2739 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2740 *
2741 * @param pVM VM handle.
2742 * @param pVCpu VMCPU handle.
2743 * @param pfFFDone Where to store an indicator telling whether or not
2744 * FFs were done before returning.
2745 */
2746static int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
2747{
2748 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTotal, a);
2749
2750 int rc = VERR_INTERNAL_ERROR;
2751 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2752 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));
2753 pVCpu->em.s.fForceRAW = false;
2754 *pfFFDone = false;
2755
2756
2757 /*
2758 *
2759 * Spin till we get a forced action or raw mode status code resulting in
2760 * in anything but VINF_SUCCESS or VINF_EM_RESCHEDULE_RAW.
2761 *
2762 */
2763 for (;;)
2764 {
2765 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWEntry, b);
2766
2767 /*
2768 * Check various preconditions.
2769 */
2770#ifdef VBOX_STRICT
2771 Assert(REMR3QueryPendingInterrupt(pVM, pVCpu) == REM_NO_PENDING_IRQ);
2772 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) == 3 || (pCtx->ss & X86_SEL_RPL) == 0);
2773 AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)
2774 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
2775 ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
2776 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
2777 && PGMMapHasConflicts(pVM))
2778 {
2779 PGMMapCheck(pVM);
2780 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2781 return VERR_INTERNAL_ERROR;
2782 }
2783#endif /* VBOX_STRICT */
2784
2785 /*
2786 * Process high priority pre-execution raw-mode FFs.
2787 */
2788 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
2789 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2790 {
2791 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
2792 if (rc != VINF_SUCCESS)
2793 break;
2794 }
2795
2796 /*
2797 * If we're going to execute ring-0 code, the guest state needs to
2798 * be modified a bit and some of the state components (IF, SS/CS RPL,
2799 * and perhaps EIP) needs to be stored with PATM.
2800 */
2801 rc = CPUMRawEnter(pVCpu, NULL);
2802 if (rc != VINF_SUCCESS)
2803 {
2804 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
2805 break;
2806 }
2807
2808 /*
2809 * Scan code before executing it. Don't bother with user mode or V86 code
2810 */
2811 if ( (pCtx->ss & X86_SEL_RPL) <= 1
2812 && !pCtx->eflags.Bits.u1VM
2813 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
2814 {
2815 STAM_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWEntry, b);
2816 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
2817 STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b);
2818 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
2819 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2820 {
2821 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
2822 if (rc != VINF_SUCCESS)
2823 {
2824 rc = CPUMRawLeave(pVCpu, NULL, rc);
2825 break;
2826 }
2827 }
2828 }
2829
2830#ifdef LOG_ENABLED
2831 /*
2832 * Log important stuff before entering GC.
2833 */
2834 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);
2835 if (pCtx->eflags.Bits.u1VM)
2836 Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2837 else if ((pCtx->ss & X86_SEL_RPL) == 1)
2838 {
2839 bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);
2840 Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL), fCSAMScanned));
2841 }
2842 else if ((pCtx->ss & X86_SEL_RPL) == 3)
2843 Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2844#endif /* LOG_ENABLED */
2845
2846
2847
2848 /*
2849 * Execute the code.
2850 */
2851 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
2852 STAM_PROFILE_START(&pVCpu->em.s.StatRAWExec, c);
2853 rc = VMMR3RawRunGC(pVM, pVCpu);
2854 STAM_PROFILE_STOP(&pVCpu->em.s.StatRAWExec, c);
2855 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTail, d);
2856
2857 LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL)));
2858 LogFlow(("VMMR3RawRunGC returned %Rrc\n", rc));
2859
2860
2861
2862 /*
2863 * Restore the real CPU state and deal with high priority post
2864 * execution FFs before doing anything else.
2865 */
2866 rc = CPUMRawLeave(pVCpu, NULL, rc);
2867 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
2868 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
2869 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
2870 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
2871
2872#ifdef VBOX_STRICT
2873 /*
2874 * Assert TSS consistency & rc vs patch code.
2875 */
2876 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
2877 && EMIsRawRing0Enabled(pVM))
2878 SELMR3CheckTSS(pVM);
2879 switch (rc)
2880 {
2881 case VINF_SUCCESS:
2882 case VINF_EM_RAW_INTERRUPT:
2883 case VINF_PATM_PATCH_TRAP_PF:
2884 case VINF_PATM_PATCH_TRAP_GP:
2885 case VINF_PATM_PATCH_INT3:
2886 case VINF_PATM_CHECK_PATCH_PAGE:
2887 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2888 case VINF_EM_RAW_GUEST_TRAP:
2889 case VINF_EM_RESCHEDULE_RAW:
2890 break;
2891
2892 default:
2893 if (PATMIsPatchGCAddr(pVM, pCtx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))
2894 LogIt(NULL, 0, LOG_GROUP_PATM, ("Patch code interrupted at %RRv for reason %Rrc\n", (RTRCPTR)CPUMGetGuestEIP(pVCpu), rc));
2895 break;
2896 }
2897 /*
2898 * Let's go paranoid!
2899 */
2900 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
2901 && PGMMapHasConflicts(pVM))
2902 {
2903 PGMMapCheck(pVM);
2904 AssertMsgFailed(("We should not get conflicts any longer!!! rc=%Rrc\n", rc));
2905 return VERR_INTERNAL_ERROR;
2906 }
2907#endif /* VBOX_STRICT */
2908
2909 /*
2910 * Process the returned status code.
2911 */
2912 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2913 {
2914 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2915 break;
2916 }
2917 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
2918 if (rc != VINF_SUCCESS)
2919 {
2920 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
2921 if (rc != VINF_SUCCESS)
2922 {
2923 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2924 break;
2925 }
2926 }
2927
2928 /*
2929 * Check and execute forced actions.
2930 */
2931#ifdef VBOX_HIGH_RES_TIMERS_HACK
2932 TMTimerPollVoid(pVM, pVCpu);
2933#endif
2934 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2935 if ( VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)
2936 || VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2937 {
2938 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);
2939
2940 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a);
2941 rc = emR3ForcedActions(pVM, pVCpu, rc);
2942 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWTotal, a);
2943 if ( rc != VINF_SUCCESS
2944 && rc != VINF_EM_RESCHEDULE_RAW)
2945 {
2946 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
2947 if (rc != VINF_SUCCESS)
2948 {
2949 *pfFFDone = true;
2950 break;
2951 }
2952 }
2953 }
2954 }
2955
2956 /*
2957 * Return to outer loop.
2958 */
2959#if defined(LOG_ENABLED) && defined(DEBUG)
2960 RTLogFlush(NULL);
2961#endif
2962 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTotal, a);
2963 return rc;
2964}
2965
2966
2967/**
2968 * Executes hardware accelerated raw code. (Intel VMX & AMD SVM)
2969 *
2970 * This function contains the raw-mode version of the inner
2971 * execution loop (the outer loop being in EMR3ExecuteVM()).
2972 *
2973 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
2974 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2975 *
2976 * @param pVM VM handle.
2977 * @param pVCpu VMCPU handle.
2978 * @param pfFFDone Where to store an indicator telling whether or not
2979 * FFs were done before returning.
2980 */
2981static int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
2982{
2983 int rc = VERR_INTERNAL_ERROR;
2984 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2985
2986 LogFlow(("emR3HwAccExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip));
2987 *pfFFDone = false;
2988
2989 STAM_COUNTER_INC(&pVCpu->em.s.StatHwAccExecuteEntry);
2990
2991#ifdef EM_NOTIFY_HWACCM
2992 HWACCMR3NotifyScheduled(pVCpu);
2993#endif
2994
2995 /*
2996 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
2997 */
2998 for (;;)
2999 {
3000 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHwAccEntry, a);
3001
3002 /*
3003 * Process high priority pre-execution raw-mode FFs.
3004 */
3005 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */
3006 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
3007 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
3008 {
3009 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
3010 if (rc != VINF_SUCCESS)
3011 break;
3012 }
3013
3014#ifdef LOG_ENABLED
3015 /*
3016 * Log important stuff before entering GC.
3017 */
3018 if (TRPMHasTrap(pVCpu))
3019 Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs, (RTGCPTR)pCtx->rip));
3020
3021 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
3022
3023 if (pVM->cCPUs == 1)
3024 {
3025 if (pCtx->eflags.Bits.u1VM)
3026 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
3027 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
3028 Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3029 else
3030 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3031 }
3032 else
3033 {
3034 if (pCtx->eflags.Bits.u1VM)
3035 Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));
3036 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
3037 Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3038 else
3039 Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3040 }
3041#endif /* LOG_ENABLED */
3042
3043 /*
3044 * Execute the code.
3045 */
3046 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHwAccEntry, a);
3047 STAM_PROFILE_START(&pVCpu->em.s.StatHwAccExec, x);
3048 rc = VMMR3HwAccRunGC(pVM, pVCpu);
3049 STAM_PROFILE_STOP(&pVCpu->em.s.StatHwAccExec, x);
3050
3051 /*
3052 * Deal with high priority post execution FFs before doing anything else.
3053 */
3054 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
3055 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
3056 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
3057 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
3058
3059 /*
3060 * Process the returned status code.
3061 */
3062 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
3063 break;
3064
3065 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
3066 if (rc != VINF_SUCCESS)
3067 break;
3068
3069 /*
3070 * Check and execute forced actions.
3071 */
3072#ifdef VBOX_HIGH_RES_TIMERS_HACK
3073 TMTimerPollVoid(pVM, pVCpu);
3074#endif
3075 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK)
3076 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_MASK))
3077 {
3078 rc = emR3ForcedActions(pVM, pVCpu, rc);
3079 if ( rc != VINF_SUCCESS
3080 && rc != VINF_EM_RESCHEDULE_HWACC)
3081 {
3082 *pfFFDone = true;
3083 break;
3084 }
3085 }
3086 }
3087
3088 /*
3089 * Return to outer loop.
3090 */
3091#if defined(LOG_ENABLED) && defined(DEBUG)
3092 RTLogFlush(NULL);
3093#endif
3094 return rc;
3095}
3096
3097
3098/**
3099 * Decides whether to execute RAW, HWACC or REM.
3100 *
3101 * @returns new EM state
3102 * @param pVM The VM.
3103 * @param pVCpu The VMCPU handle.
3104 * @param pCtx The CPU context.
3105 */
3106static EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3107{
3108 /*
3109 * When forcing raw-mode execution, things are simple.
3110 */
3111 if (pVCpu->em.s.fForceRAW)
3112 return EMSTATE_RAW;
3113
3114 /*
3115 * We stay in the wait for SIPI state unless explicitly told otherwise.
3116 */
3117 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
3118 return EMSTATE_WAIT_SIPI;
3119
3120 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
3121 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
3122 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
3123
3124 X86EFLAGS EFlags = pCtx->eflags;
3125 if (HWACCMIsEnabled(pVM))
3126 {
3127 /* Hardware accelerated raw-mode:
3128 *
3129 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
3130 */
3131 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
3132 return EMSTATE_HWACC;
3133
3134 /* Note: Raw mode and hw accelerated mode are incompatible. The latter turns
3135 * off monitoring features essential for raw mode! */
3136 return EMSTATE_REM;
3137 }
3138
3139 /*
3140 * Standard raw-mode:
3141 *
3142 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
3143 * or 32 bits protected mode ring 0 code
3144 *
3145 * The tests are ordered by the likelyhood of being true during normal execution.
3146 */
3147 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
3148 {
3149 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
3150 return EMSTATE_REM;
3151 }
3152
3153#ifndef VBOX_RAW_V86
3154 if (EFlags.u32 & X86_EFL_VM) {
3155 Log2(("raw mode refused: VM_MASK\n"));
3156 return EMSTATE_REM;
3157 }
3158#endif
3159
3160 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
3161 uint32_t u32CR0 = pCtx->cr0;
3162 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3163 {
3164 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
3165 return EMSTATE_REM;
3166 }
3167
3168 if (pCtx->cr4 & X86_CR4_PAE)
3169 {
3170 uint32_t u32Dummy, u32Features;
3171
3172 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
3173 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
3174 return EMSTATE_REM;
3175 }
3176
3177 unsigned uSS = pCtx->ss;
3178 if ( pCtx->eflags.Bits.u1VM
3179 || (uSS & X86_SEL_RPL) == 3)
3180 {
3181 if (!EMIsRawRing3Enabled(pVM))
3182 return EMSTATE_REM;
3183
3184 if (!(EFlags.u32 & X86_EFL_IF))
3185 {
3186 Log2(("raw mode refused: IF (RawR3)\n"));
3187 return EMSTATE_REM;
3188 }
3189
3190 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
3191 {
3192 Log2(("raw mode refused: CR0.WP + RawR0\n"));
3193 return EMSTATE_REM;
3194 }
3195 }
3196 else
3197 {
3198 if (!EMIsRawRing0Enabled(pVM))
3199 return EMSTATE_REM;
3200
3201 /* Only ring 0 supervisor code. */
3202 if ((uSS & X86_SEL_RPL) != 0)
3203 {
3204 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
3205 return EMSTATE_REM;
3206 }
3207
3208 // Let's start with pure 32 bits ring 0 code first
3209 /** @todo What's pure 32-bit mode? flat? */
3210 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
3211 || !(pCtx->csHid.Attr.n.u1DefBig))
3212 {
3213 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
3214 return EMSTATE_REM;
3215 }
3216
3217 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
3218 if (!(u32CR0 & X86_CR0_WP))
3219 {
3220 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
3221 return EMSTATE_REM;
3222 }
3223
3224 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
3225 {
3226 Log2(("raw r0 mode forced: patch code\n"));
3227 return EMSTATE_RAW;
3228 }
3229
3230#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
3231 if (!(EFlags.u32 & X86_EFL_IF))
3232 {
3233 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
3234 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
3235 return EMSTATE_REM;
3236 }
3237#endif
3238
3239 /** @todo still necessary??? */
3240 if (EFlags.Bits.u2IOPL != 0)
3241 {
3242 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
3243 return EMSTATE_REM;
3244 }
3245 }
3246
3247 Assert(PGMPhysIsA20Enabled(pVCpu));
3248 return EMSTATE_RAW;
3249}
3250
3251
3252/**
3253 * Executes all high priority post execution force actions.
3254 *
3255 * @returns rc or a fatal status code.
3256 *
3257 * @param pVM VM handle.
3258 * @param pVCpu VMCPU handle.
3259 * @param rc The current rc.
3260 */
3261static int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
3262{
3263 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
3264 PDMCritSectFF(pVCpu);
3265
3266 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
3267 CSAMR3DoPendingAction(pVM, pVCpu);
3268
3269 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3270 {
3271 if ( rc > VINF_EM_NO_MEMORY
3272 && rc <= VINF_EM_LAST)
3273 rc = VINF_EM_NO_MEMORY;
3274 }
3275
3276 return rc;
3277}
3278
3279
3280/**
3281 * Executes all pending forced actions.
3282 *
3283 * Forced actions can cause execution delays and execution
3284 * rescheduling. The first we deal with using action priority, so
3285 * that for instance pending timers aren't scheduled and ran until
3286 * right before execution. The rescheduling we deal with using
3287 * return codes. The same goes for VM termination, only in that case
3288 * we exit everything.
3289 *
3290 * @returns VBox status code of equal or greater importance/severity than rc.
3291 * The most important ones are: VINF_EM_RESCHEDULE,
3292 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
3293 *
3294 * @param pVM VM handle.
3295 * @param pVCpu VMCPU handle.
3296 * @param rc The current rc.
3297 *
3298 */
3299static int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
3300{
3301 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
3302#ifdef VBOX_STRICT
3303 int rcIrq = VINF_SUCCESS;
3304#endif
3305 int rc2;
3306#define UPDATE_RC() \
3307 do { \
3308 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
3309 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
3310 break; \
3311 if (!rc || rc2 < rc) \
3312 rc = rc2; \
3313 } while (0)
3314
3315 /*
3316 * Post execution chunk first.
3317 */
3318 if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
3319 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
3320 {
3321 /*
3322 * EMT Rendezvous (must be serviced before termination).
3323 */
3324 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
3325 VMMR3EmtRendezvousFF(pVM, pVCpu);
3326
3327 /*
3328 * Termination request.
3329 */
3330 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
3331 {
3332 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3333 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3334 return VINF_EM_TERMINATE;
3335 }
3336
3337 /*
3338 * Debugger Facility polling.
3339 */
3340 if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
3341 {
3342 rc2 = DBGFR3VMMForcedAction(pVM);
3343 UPDATE_RC();
3344 }
3345
3346 /*
3347 * Postponed reset request.
3348 */
3349 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET_BIT))
3350 {
3351 rc2 = VMR3Reset(pVM);
3352 UPDATE_RC();
3353 }
3354
3355 /*
3356 * CSAM page scanning.
3357 */
3358 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3359 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
3360 {
3361 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
3362
3363 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
3364 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
3365
3366 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
3367 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
3368 }
3369
3370 /*
3371 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
3372 */
3373 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3374 {
3375 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3376 UPDATE_RC();
3377 if (rc == VINF_EM_NO_MEMORY)
3378 return rc;
3379 }
3380
3381 /* check that we got them all */
3382 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
3383 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
3384 }
3385
3386 /*
3387 * Normal priority then.
3388 * (Executed in no particular order.)
3389 */
3390 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
3391 {
3392 /*
3393 * PDM Queues are pending.
3394 */
3395 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
3396 PDMR3QueueFlushAll(pVM);
3397
3398 /*
3399 * PDM DMA transfers are pending.
3400 */
3401 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
3402 PDMR3DmaRun(pVM);
3403
3404 /*
3405 * EMT Rendezvous (make sure they are handled before the requests).
3406 */
3407 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
3408 VMMR3EmtRendezvousFF(pVM, pVCpu);
3409
3410 /*
3411 * Requests from other threads.
3412 */
3413 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
3414 {
3415 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY);
3416 Assert(rc2 != VINF_EM_RESET); /* should be per-VCPU */
3417 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE)
3418 {
3419 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
3420 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3421 return rc2;
3422 }
3423 UPDATE_RC();
3424 }
3425
3426 /* Replay the handler notification changes. */
3427 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
3428 {
3429 /* Try not to cause deadlocks. */
3430 if ( pVM->cCPUs == 1
3431 || ( !PGMIsLockOwner(pVM)
3432 && !IOMIsLockOwner(pVM))
3433 )
3434 {
3435 EMRemLock(pVM);
3436 REMR3ReplayHandlerNotifications(pVM);
3437 EMRemUnlock(pVM);
3438 }
3439 }
3440
3441 /* check that we got them all */
3442 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
3443 }
3444
3445 /*
3446 * Normal priority then. (per-VCPU)
3447 * (Executed in no particular order.)
3448 */
3449 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3450 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
3451 {
3452 /*
3453 * Requests from other threads.
3454 */
3455 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
3456 {
3457 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu);
3458 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
3459 {
3460 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
3461 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3462 return rc2;
3463 }
3464 UPDATE_RC();
3465 }
3466
3467 /* check that we got them all */
3468 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
3469 }
3470
3471 /*
3472 * High priority pre execution chunk last.
3473 * (Executed in ascending priority order.)
3474 */
3475 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
3476 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
3477 {
3478 /*
3479 * Timers before interrupts.
3480 */
3481 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
3482 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3483 TMR3TimerQueuesDo(pVM);
3484
3485 /*
3486 * The instruction following an emulated STI should *always* be executed!
3487 */
3488 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3489 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3490 {
3491 Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
3492 if (CPUMGetGuestEIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
3493 {
3494 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
3495 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
3496 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
3497 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
3498 */
3499 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3500 }
3501 if (HWACCMR3IsActive(pVCpu))
3502 rc2 = VINF_EM_RESCHEDULE_HWACC;
3503 else
3504 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
3505
3506 UPDATE_RC();
3507 }
3508
3509 /*
3510 * Interrupts.
3511 */
3512 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3513 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3514 && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
3515 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
3516 && PATMAreInterruptsEnabled(pVM)
3517 && !HWACCMR3IsEventPending(pVM))
3518 {
3519 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
3520 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
3521 {
3522 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
3523 /** @todo this really isn't nice, should properly handle this */
3524 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
3525#ifdef VBOX_STRICT
3526 rcIrq = rc2;
3527#endif
3528 UPDATE_RC();
3529 }
3530 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
3531 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
3532 {
3533 rc2 = VINF_EM_RESCHEDULE_REM;
3534 UPDATE_RC();
3535 }
3536 }
3537
3538 /*
3539 * Allocate handy pages.
3540 */
3541 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
3542 {
3543 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3544 UPDATE_RC();
3545 }
3546
3547 /*
3548 * Debugger Facility request.
3549 */
3550 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
3551 {
3552 rc2 = DBGFR3VMMForcedAction(pVM);
3553 UPDATE_RC();
3554 }
3555
3556 /*
3557 * EMT Rendezvous (must be serviced before termination).
3558 */
3559 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
3560 VMMR3EmtRendezvousFF(pVM, pVCpu);
3561
3562 /*
3563 * Termination request.
3564 */
3565 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
3566 {
3567 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3568 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3569 return VINF_EM_TERMINATE;
3570 }
3571
3572 /*
3573 * Out of memory? Since most of our fellow high priority actions may cause us
3574 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
3575 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
3576 * than us since we can terminate without allocating more memory.
3577 */
3578 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3579 {
3580 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3581 UPDATE_RC();
3582 if (rc == VINF_EM_NO_MEMORY)
3583 return rc;
3584 }
3585
3586 /*
3587 * If the virtual sync clock is still stopped, make TM restart it.
3588 */
3589 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
3590 TMR3VirtualSyncFF(pVM, pVCpu);
3591
3592#ifdef DEBUG
3593 /*
3594 * Debug, pause the VM.
3595 */
3596 if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
3597 {
3598 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
3599 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
3600 return VINF_EM_SUSPEND;
3601 }
3602#endif
3603
3604 /* check that we got them all */
3605 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
3606 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
3607 }
3608
3609#undef UPDATE_RC
3610 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
3611 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3612 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
3613 return rc;
3614}
3615
3616/**
3617 * Release the IOM lock if owned by the current VCPU
3618 *
3619 * @param pVM The VM to operate on.
3620 */
3621VMMR3DECL(void) EMR3ReleaseOwnedLocks(PVM pVM)
3622{
3623 while (PDMCritSectIsOwner(&pVM->em.s.CritSectREM))
3624 PDMCritSectLeave(&pVM->em.s.CritSectREM);
3625}
3626
3627
3628/**
3629 * Execute VM.
3630 *
3631 * This function is the main loop of the VM. The emulation thread
3632 * calls this function when the VM has been successfully constructed
3633 * and we're ready for executing the VM.
3634 *
3635 * Returning from this function means that the VM is turned off or
3636 * suspended (state already saved) and deconstruction in next in line.
3637 *
3638 * All interaction from other thread are done using forced actions
3639 * and signaling of the wait object.
3640 *
3641 * @returns VBox status code, informational status codes may indicate failure.
3642 * @param pVM The VM to operate on.
3643 * @param pVCpu The VMCPU to operate on.
3644 */
3645VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
3646{
3647 LogFlow(("EMR3ExecuteVM: pVM=%p enmVMState=%d enmState=%d (%s) fForceRAW=%d\n", pVM, pVM->enmVMState,
3648 pVCpu->em.s.enmState, EMR3GetStateName(pVCpu->em.s.enmState), pVCpu->em.s.fForceRAW));
3649 VM_ASSERT_EMT(pVM);
3650 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
3651 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
3652 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
3653 ("%s\n", EMR3GetStateName(pVCpu->em.s.enmState)));
3654
3655 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
3656 if (rc == 0)
3657 {
3658 /*
3659 * Start the virtual time.
3660 */
3661 TMR3NotifyResume(pVM, pVCpu);
3662
3663 /*
3664 * The Outer Main Loop.
3665 */
3666 bool fFFDone = false;
3667
3668 /* Reschedule right away to start in the right state. */
3669 rc = VINF_SUCCESS;
3670
3671 /* If resuming after a pause or a state load, restore the previous
3672 state or else we'll start executing code. Else, just reschedule. */
3673 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
3674 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
3675 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
3676 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3677 else
3678 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
3679
3680 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3681 for (;;)
3682 {
3683 /*
3684 * Before we can schedule anything (we're here because
3685 * scheduling is required) we must service any pending
3686 * forced actions to avoid any pending action causing
3687 * immediate rescheduling upon entering an inner loop
3688 *
3689 * Do forced actions.
3690 */
3691 if ( !fFFDone
3692 && rc != VINF_EM_TERMINATE
3693 && rc != VINF_EM_OFF
3694 && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
3695 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK)))
3696 {
3697 rc = emR3ForcedActions(pVM, pVCpu, rc);
3698 if ( ( rc == VINF_EM_RESCHEDULE_REM
3699 || rc == VINF_EM_RESCHEDULE_HWACC)
3700 && pVCpu->em.s.fForceRAW)
3701 rc = VINF_EM_RESCHEDULE_RAW;
3702 }
3703 else if (fFFDone)
3704 fFFDone = false;
3705
3706 /*
3707 * Now what to do?
3708 */
3709 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
3710 switch (rc)
3711 {
3712 /*
3713 * Keep doing what we're currently doing.
3714 */
3715 case VINF_SUCCESS:
3716 break;
3717
3718 /*
3719 * Reschedule - to raw-mode execution.
3720 */
3721 case VINF_EM_RESCHEDULE_RAW:
3722 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVCpu->em.s.enmState, EMSTATE_RAW));
3723 pVCpu->em.s.enmState = EMSTATE_RAW;
3724 break;
3725
3726 /*
3727 * Reschedule - to hardware accelerated raw-mode execution.
3728 */
3729 case VINF_EM_RESCHEDULE_HWACC:
3730 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVCpu->em.s.enmState, EMSTATE_HWACC));
3731 Assert(!pVCpu->em.s.fForceRAW);
3732 pVCpu->em.s.enmState = EMSTATE_HWACC;
3733 break;
3734
3735 /*
3736 * Reschedule - to recompiled execution.
3737 */
3738 case VINF_EM_RESCHEDULE_REM:
3739 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVCpu->em.s.enmState, EMSTATE_REM));
3740 pVCpu->em.s.enmState = EMSTATE_REM;
3741 break;
3742
3743#ifdef VBOX_WITH_VMI
3744 /*
3745 * Reschedule - parav call.
3746 */
3747 case VINF_EM_RESCHEDULE_PARAV:
3748 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_PARAV: %d -> %d (EMSTATE_PARAV)\n", pVCpu->em.s.enmState, EMSTATE_PARAV));
3749 pVCpu->em.s.enmState = EMSTATE_PARAV;
3750 break;
3751#endif
3752
3753 /*
3754 * Resume.
3755 */
3756 case VINF_EM_RESUME:
3757 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVCpu->em.s.enmState));
3758 /* Don't reschedule in the halted or wait for SIPI case. */
3759 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
3760 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
3761 break;
3762 /* fall through and get scheduled. */
3763
3764 /*
3765 * Reschedule.
3766 */
3767 case VINF_EM_RESCHEDULE:
3768 {
3769 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
3770 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, EMR3GetStateName(enmState)));
3771 pVCpu->em.s.enmState = enmState;
3772 break;
3773 }
3774
3775 /*
3776 * Halted.
3777 */
3778 case VINF_EM_HALT:
3779 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_HALTED));
3780 pVCpu->em.s.enmState = EMSTATE_HALTED;
3781 break;
3782
3783 /*
3784 * Switch to the wait for SIPI state (application processor only)
3785 */
3786 case VINF_EM_WAIT_SIPI:
3787 Assert(pVCpu->idCpu != 0);
3788 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_WAIT_SIPI));
3789 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
3790 break;
3791
3792
3793 /*
3794 * Suspend.
3795 */
3796 case VINF_EM_SUSPEND:
3797 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
3798 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3799 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3800 break;
3801
3802 /*
3803 * Reset.
3804 * We might end up doing a double reset for now, we'll have to clean up the mess later.
3805 */
3806 case VINF_EM_RESET:
3807 {
3808 if (pVCpu->idCpu == 0)
3809 {
3810 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
3811 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, EMR3GetStateName(enmState)));
3812 pVCpu->em.s.enmState = enmState;
3813 }
3814 else
3815 {
3816 /* All other VCPUs go into the wait for SIPI state. */
3817 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
3818 }
3819 break;
3820 }
3821
3822 /*
3823 * Power Off.
3824 */
3825 case VINF_EM_OFF:
3826 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3827 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
3828 TMR3NotifySuspend(pVM, pVCpu);
3829 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3830 return rc;
3831
3832 /*
3833 * Terminate the VM.
3834 */
3835 case VINF_EM_TERMINATE:
3836 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3837 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
3838 TMR3NotifySuspend(pVM, pVCpu);
3839 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3840 return rc;
3841
3842
3843 /*
3844 * Out of memory, suspend the VM and stuff.
3845 */
3846 case VINF_EM_NO_MEMORY:
3847 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
3848 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3849 TMR3NotifySuspend(pVM, pVCpu);
3850 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3851
3852 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
3853 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
3854 if (rc != VINF_EM_SUSPEND)
3855 {
3856 if (RT_SUCCESS_NP(rc))
3857 {
3858 AssertLogRelMsgFailed(("%Rrc\n", rc));
3859 rc = VERR_EM_INTERNAL_ERROR;
3860 }
3861 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3862 }
3863 return rc;
3864
3865 /*
3866 * Guest debug events.
3867 */
3868 case VINF_EM_DBG_STEPPED:
3869 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
3870 case VINF_EM_DBG_STOP:
3871 case VINF_EM_DBG_BREAKPOINT:
3872 case VINF_EM_DBG_STEP:
3873 if (pVCpu->em.s.enmState == EMSTATE_RAW)
3874 {
3875 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
3876 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
3877 }
3878 else
3879 {
3880 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
3881 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
3882 }
3883 break;
3884
3885 /*
3886 * Hypervisor debug events.
3887 */
3888 case VINF_EM_DBG_HYPER_STEPPED:
3889 case VINF_EM_DBG_HYPER_BREAKPOINT:
3890 case VINF_EM_DBG_HYPER_ASSERTION:
3891 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_HYPER));
3892 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
3893 break;
3894
3895 /*
3896 * Guru mediations.
3897 */
3898 case VERR_VMM_RING0_ASSERTION:
3899 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
3900 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3901 break;
3902
3903 /*
3904 * Any error code showing up here other than the ones we
3905 * know and process above are considered to be FATAL.
3906 *
3907 * Unknown warnings and informational status codes are also
3908 * included in this.
3909 */
3910 default:
3911 if (RT_SUCCESS_NP(rc))
3912 {
3913 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
3914 rc = VERR_EM_INTERNAL_ERROR;
3915 }
3916 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3917 Log(("EMR3ExecuteVM returns %d\n", rc));
3918 break;
3919 }
3920
3921 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
3922 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3923
3924 /*
3925 * Act on the state.
3926 */
3927 switch (pVCpu->em.s.enmState)
3928 {
3929 /*
3930 * Execute raw.
3931 */
3932 case EMSTATE_RAW:
3933 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
3934 break;
3935
3936 /*
3937 * Execute hardware accelerated raw.
3938 */
3939 case EMSTATE_HWACC:
3940 rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
3941 break;
3942
3943 /*
3944 * Execute recompiled.
3945 */
3946 case EMSTATE_REM:
3947 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
3948 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
3949 break;
3950
3951#ifdef VBOX_WITH_VMI
3952 /*
3953 * Execute PARAV function.
3954 */
3955 case EMSTATE_PARAV:
3956 rc = PARAVCallFunction(pVM);
3957 pVCpu->em.s.enmState = EMSTATE_REM;
3958 break;
3959#endif
3960
3961 /*
3962 * Application processor execution halted until SIPI.
3963 */
3964 case EMSTATE_WAIT_SIPI:
3965 /* no break */
3966 /*
3967 * hlt - execution halted until interrupt.
3968 */
3969 case EMSTATE_HALTED:
3970 {
3971 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
3972 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
3973 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
3974 break;
3975 }
3976
3977 /*
3978 * Suspended - return to VM.cpp.
3979 */
3980 case EMSTATE_SUSPENDED:
3981 TMR3NotifySuspend(pVM, pVCpu);
3982 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3983 return VINF_EM_SUSPEND;
3984
3985 /*
3986 * Debugging in the guest.
3987 */
3988 case EMSTATE_DEBUG_GUEST_REM:
3989 case EMSTATE_DEBUG_GUEST_RAW:
3990 TMR3NotifySuspend(pVM, pVCpu);
3991 rc = emR3Debug(pVM, pVCpu, rc);
3992 TMR3NotifyResume(pVM, pVCpu);
3993 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3994 break;
3995
3996 /*
3997 * Debugging in the hypervisor.
3998 */
3999 case EMSTATE_DEBUG_HYPER:
4000 {
4001 TMR3NotifySuspend(pVM, pVCpu);
4002 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
4003
4004 rc = emR3Debug(pVM, pVCpu, rc);
4005 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
4006 if (rc != VINF_SUCCESS)
4007 {
4008 /* switch to guru meditation mode */
4009 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
4010 VMMR3FatalDump(pVM, pVCpu, rc);
4011 return rc;
4012 }
4013
4014 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
4015 TMR3NotifyResume(pVM, pVCpu);
4016 break;
4017 }
4018
4019 /*
4020 * Guru meditation takes place in the debugger.
4021 */
4022 case EMSTATE_GURU_MEDITATION:
4023 {
4024 TMR3NotifySuspend(pVM, pVCpu);
4025 VMMR3FatalDump(pVM, pVCpu, rc);
4026 emR3Debug(pVM, pVCpu, rc);
4027 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
4028 return rc;
4029 }
4030
4031 /*
4032 * The states we don't expect here.
4033 */
4034 case EMSTATE_NONE:
4035 case EMSTATE_TERMINATING:
4036 default:
4037 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
4038 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
4039 TMR3NotifySuspend(pVM, pVCpu);
4040 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
4041 return VERR_EM_INTERNAL_ERROR;
4042 }
4043 } /* The Outer Main Loop */
4044 }
4045 else
4046 {
4047 /*
4048 * Fatal error.
4049 */
4050 LogFlow(("EMR3ExecuteVM: returns %Rrc (longjmp / fatal error)\n", rc));
4051 TMR3NotifySuspend(pVM, pVCpu);
4052 VMMR3FatalDump(pVM, pVCpu, rc);
4053 emR3Debug(pVM, pVCpu, rc);
4054 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
4055 /** @todo change the VM state! */
4056 return rc;
4057 }
4058
4059 /* (won't ever get here). */
4060 AssertFailed();
4061}
4062
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette