VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 62644

Last change on this file since 62644 was 62640, checked in by vboxsync, 8 years ago

VMM: warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 124.9 KB
Line 
1/* $Id: EM.cpp 62640 2016-07-28 21:10:54Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#ifdef VBOX_WITH_REM
50# include <VBox/vmm/rem.h>
51#endif
52#ifdef VBOX_WITH_NEW_APIC
53# include <VBox/vmm/apic.h>
54#endif
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
95static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
96int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
97
98
99/**
100 * Initializes the EM.
101 *
102 * @returns VBox status code.
103 * @param pVM The cross context VM structure.
104 */
105VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
106{
107 LogFlow(("EMR3Init\n"));
108 /*
109 * Assert alignment and sizes.
110 */
111 AssertCompileMemberAlignment(VM, em.s, 32);
112 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
113 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
114
115 /*
116 * Init the structure.
117 */
118 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
119 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
120 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
121
122 bool fEnabled;
123 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileUser = !fEnabled;
126
127 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->fRecompileSupervisor = !fEnabled;
130
131#ifdef VBOX_WITH_RAW_RING1
132 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
133 AssertLogRelRCReturn(rc, rc);
134#else
135 pVM->fRawRing1Enabled = false; /* Disabled by default. */
136#endif
137
138 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
139 AssertLogRelRCReturn(rc, rc);
140
141 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
142 AssertLogRelRCReturn(rc, rc);
143 pVM->em.s.fGuruOnTripleFault = !fEnabled;
144 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
145 {
146 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
147 pVM->em.s.fGuruOnTripleFault = true;
148 }
149
150 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
151 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
152
153#ifdef VBOX_WITH_REM
154 /*
155 * Initialize the REM critical section.
156 */
157 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
158 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
159 AssertRCReturn(rc, rc);
160#endif
161
162 /*
163 * Saved state.
164 */
165 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
166 NULL, NULL, NULL,
167 NULL, emR3Save, NULL,
168 NULL, emR3Load, NULL);
169 if (RT_FAILURE(rc))
170 return rc;
171
172 for (VMCPUID i = 0; i < pVM->cCpus; i++)
173 {
174 PVMCPU pVCpu = &pVM->aCpus[i];
175
176 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
177 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
178 pVCpu->em.s.fForceRAW = false;
179
180 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
181#ifdef VBOX_WITH_RAW_MODE
182 if (!HMIsEnabled(pVM))
183 {
184 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
185 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
186 }
187#endif
188
189 /* Force reset of the time slice. */
190 pVCpu->em.s.u64TimeSliceStart = 0;
191
192# define EM_REG_COUNTER(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
194 AssertRC(rc);
195
196# define EM_REG_COUNTER_USED(a, b, c) \
197 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
198 AssertRC(rc);
199
200# define EM_REG_PROFILE(a, b, c) \
201 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
202 AssertRC(rc);
203
204# define EM_REG_PROFILE_ADV(a, b, c) \
205 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
206 AssertRC(rc);
207
208 /*
209 * Statistics.
210 */
211#ifdef VBOX_WITH_STATISTICS
212 PEMSTATS pStats;
213 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
214 if (RT_FAILURE(rc))
215 return rc;
216
217 pVCpu->em.s.pStatsR3 = pStats;
218 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
219 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
220
221 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
222 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
223
224 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
225 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226
227 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301
302 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
303 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
304
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391
392 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
393 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
397 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
418
419 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
420 pVCpu->em.s.pCliStatTree = 0;
421
422 /* these should be considered for release statistics. */
423 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
424 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
425 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
429 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
431 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
432 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
433 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
434 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
435
436#endif /* VBOX_WITH_STATISTICS */
437
438 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
439 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
440 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
441 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
442 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
443
444 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
445 }
446
447 emR3InitDbg(pVM);
448 return VINF_SUCCESS;
449}
450
451
452/**
453 * Applies relocations to data and code managed by this
454 * component. This function will be called at init and
455 * whenever the VMM need to relocate it self inside the GC.
456 *
457 * @param pVM The cross context VM structure.
458 */
459VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
460{
461 LogFlow(("EMR3Relocate\n"));
462 for (VMCPUID i = 0; i < pVM->cCpus; i++)
463 {
464 PVMCPU pVCpu = &pVM->aCpus[i];
465 if (pVCpu->em.s.pStatsR3)
466 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
467 }
468}
469
470
471/**
472 * Reset the EM state for a CPU.
473 *
474 * Called by EMR3Reset and hot plugging.
475 *
476 * @param pVCpu The cross context virtual CPU structure.
477 */
478VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
479{
480 pVCpu->em.s.fForceRAW = false;
481
482 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
483 out of the HALTED state here so that enmPrevState doesn't end up as
484 HALTED when EMR3Execute returns. */
485 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
486 {
487 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
488 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
489 }
490}
491
492
493/**
494 * Reset notification.
495 *
496 * @param pVM The cross context VM structure.
497 */
498VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
499{
500 Log(("EMR3Reset: \n"));
501 for (VMCPUID i = 0; i < pVM->cCpus; i++)
502 EMR3ResetCpu(&pVM->aCpus[i]);
503}
504
505
506/**
507 * Terminates the EM.
508 *
509 * Termination means cleaning up and freeing all resources,
510 * the VM it self is at this point powered off or suspended.
511 *
512 * @returns VBox status code.
513 * @param pVM The cross context VM structure.
514 */
515VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
516{
517 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
518
519#ifdef VBOX_WITH_REM
520 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
521#endif
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Execute state save operation.
528 *
529 * @returns VBox status code.
530 * @param pVM The cross context VM structure.
531 * @param pSSM SSM operation handle.
532 */
533static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
534{
535 for (VMCPUID i = 0; i < pVM->cCpus; i++)
536 {
537 PVMCPU pVCpu = &pVM->aCpus[i];
538
539 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
540 AssertRCReturn(rc, rc);
541
542 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
543 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
544 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
545 AssertRCReturn(rc, rc);
546
547 /* Save mwait state. */
548 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
549 AssertRCReturn(rc, rc);
550 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
551 AssertRCReturn(rc, rc);
552 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
553 AssertRCReturn(rc, rc);
554 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
555 AssertRCReturn(rc, rc);
556 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
557 AssertRCReturn(rc, rc);
558 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
559 AssertRCReturn(rc, rc);
560 }
561 return VINF_SUCCESS;
562}
563
564
565/**
566 * Execute state load operation.
567 *
568 * @returns VBox status code.
569 * @param pVM The cross context VM structure.
570 * @param pSSM SSM operation handle.
571 * @param uVersion Data layout version.
572 * @param uPass The data pass.
573 */
574static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
575{
576 /*
577 * Validate version.
578 */
579 if ( uVersion > EM_SAVED_STATE_VERSION
580 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
581 {
582 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
583 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
584 }
585 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
586
587 /*
588 * Load the saved state.
589 */
590 for (VMCPUID i = 0; i < pVM->cCpus; i++)
591 {
592 PVMCPU pVCpu = &pVM->aCpus[i];
593
594 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
595 if (RT_FAILURE(rc))
596 pVCpu->em.s.fForceRAW = false;
597 AssertRCReturn(rc, rc);
598
599 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
600 {
601 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
602 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
603 AssertRCReturn(rc, rc);
604 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
605
606 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
607 }
608 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
609 {
610 /* Load mwait state. */
611 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
612 AssertRCReturn(rc, rc);
613 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
614 AssertRCReturn(rc, rc);
615 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
616 AssertRCReturn(rc, rc);
617 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
618 AssertRCReturn(rc, rc);
619 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
620 AssertRCReturn(rc, rc);
621 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
622 AssertRCReturn(rc, rc);
623 }
624
625 Assert(!pVCpu->em.s.pCliStatTree);
626 }
627 return VINF_SUCCESS;
628}
629
630
631/**
632 * Argument packet for emR3SetExecutionPolicy.
633 */
634struct EMR3SETEXECPOLICYARGS
635{
636 EMEXECPOLICY enmPolicy;
637 bool fEnforce;
638};
639
640
641/**
642 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
643 */
644static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
645{
646 /*
647 * Only the first CPU changes the variables.
648 */
649 if (pVCpu->idCpu == 0)
650 {
651 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
652 switch (pArgs->enmPolicy)
653 {
654 case EMEXECPOLICY_RECOMPILE_RING0:
655 pVM->fRecompileSupervisor = pArgs->fEnforce;
656 break;
657 case EMEXECPOLICY_RECOMPILE_RING3:
658 pVM->fRecompileUser = pArgs->fEnforce;
659 break;
660 case EMEXECPOLICY_IEM_ALL:
661 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
662 break;
663 default:
664 AssertFailedReturn(VERR_INVALID_PARAMETER);
665 }
666 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
667 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
668 }
669
670 /*
671 * Force rescheduling if in RAW, HM, IEM, or REM.
672 */
673 return pVCpu->em.s.enmState == EMSTATE_RAW
674 || pVCpu->em.s.enmState == EMSTATE_HM
675 || pVCpu->em.s.enmState == EMSTATE_IEM
676 || pVCpu->em.s.enmState == EMSTATE_REM
677 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
678 ? VINF_EM_RESCHEDULE
679 : VINF_SUCCESS;
680}
681
682
683/**
684 * Changes an execution scheduling policy parameter.
685 *
686 * This is used to enable or disable raw-mode / hardware-virtualization
687 * execution of user and supervisor code.
688 *
689 * @returns VINF_SUCCESS on success.
690 * @returns VINF_RESCHEDULE if a rescheduling might be required.
691 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
692 *
693 * @param pUVM The user mode VM handle.
694 * @param enmPolicy The scheduling policy to change.
695 * @param fEnforce Whether to enforce the policy or not.
696 */
697VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
698{
699 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
700 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
701 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
702
703 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
704 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
705}
706
707
708/**
709 * Queries an execution scheduling policy parameter.
710 *
711 * @returns VBox status code
712 * @param pUVM The user mode VM handle.
713 * @param enmPolicy The scheduling policy to query.
714 * @param pfEnforced Where to return the current value.
715 */
716VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
717{
718 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
719 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
720 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
721 PVM pVM = pUVM->pVM;
722 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
723
724 /* No need to bother EMTs with a query. */
725 switch (enmPolicy)
726 {
727 case EMEXECPOLICY_RECOMPILE_RING0:
728 *pfEnforced = pVM->fRecompileSupervisor;
729 break;
730 case EMEXECPOLICY_RECOMPILE_RING3:
731 *pfEnforced = pVM->fRecompileUser;
732 break;
733 case EMEXECPOLICY_IEM_ALL:
734 *pfEnforced = pVM->em.s.fIemExecutesAll;
735 break;
736 default:
737 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
738 }
739
740 return VINF_SUCCESS;
741}
742
743
744/**
745 * Raise a fatal error.
746 *
747 * Safely terminate the VM with full state report and stuff. This function
748 * will naturally never return.
749 *
750 * @param pVCpu The cross context virtual CPU structure.
751 * @param rc VBox status code.
752 */
753VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
754{
755 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
756 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
757}
758
759
760#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
761/**
762 * Gets the EM state name.
763 *
764 * @returns pointer to read only state name,
765 * @param enmState The state.
766 */
767static const char *emR3GetStateName(EMSTATE enmState)
768{
769 switch (enmState)
770 {
771 case EMSTATE_NONE: return "EMSTATE_NONE";
772 case EMSTATE_RAW: return "EMSTATE_RAW";
773 case EMSTATE_HM: return "EMSTATE_HM";
774 case EMSTATE_IEM: return "EMSTATE_IEM";
775 case EMSTATE_REM: return "EMSTATE_REM";
776 case EMSTATE_HALTED: return "EMSTATE_HALTED";
777 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
778 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
779 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
780 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
781 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
782 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
783 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
784 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
785 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
786 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
787 default: return "Unknown!";
788 }
789}
790#endif /* LOG_ENABLED || VBOX_STRICT */
791
792
793/**
794 * Debug loop.
795 *
796 * @returns VBox status code for EM.
797 * @param pVM The cross context VM structure.
798 * @param pVCpu The cross context virtual CPU structure.
799 * @param rc Current EM VBox status code.
800 */
801static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
802{
803 for (;;)
804 {
805 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
806 const VBOXSTRICTRC rcLast = rc;
807
808 /*
809 * Debug related RC.
810 */
811 switch (VBOXSTRICTRC_VAL(rc))
812 {
813 /*
814 * Single step an instruction.
815 */
816 case VINF_EM_DBG_STEP:
817 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
818 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
819 || pVCpu->em.s.fForceRAW /* paranoia */)
820#ifdef VBOX_WITH_RAW_MODE
821 rc = emR3RawStep(pVM, pVCpu);
822#else
823 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
824#endif
825 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
826 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
827#ifdef VBOX_WITH_REM
828 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
829 rc = emR3RemStep(pVM, pVCpu);
830#endif
831 else
832 {
833 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
834 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
835 rc = VINF_EM_DBG_STEPPED;
836 }
837 break;
838
839 /*
840 * Simple events: stepped, breakpoint, stop/assertion.
841 */
842 case VINF_EM_DBG_STEPPED:
843 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
844 break;
845
846 case VINF_EM_DBG_BREAKPOINT:
847 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
848 break;
849
850 case VINF_EM_DBG_STOP:
851 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
852 break;
853
854 case VINF_EM_DBG_EVENT:
855 rc = DBGFR3EventHandlePending(pVM, pVCpu);
856 break;
857
858 case VINF_EM_DBG_HYPER_STEPPED:
859 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
860 break;
861
862 case VINF_EM_DBG_HYPER_BREAKPOINT:
863 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
864 break;
865
866 case VINF_EM_DBG_HYPER_ASSERTION:
867 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
868 RTLogFlush(NULL);
869 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
870 break;
871
872 /*
873 * Guru meditation.
874 */
875 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
876 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
877 break;
878 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
879 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
880 break;
881 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
882 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
883 break;
884
885 default: /** @todo don't use default for guru, but make special errors code! */
886 {
887 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
888 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
889 break;
890 }
891 }
892
893 /*
894 * Process the result.
895 */
896 switch (VBOXSTRICTRC_VAL(rc))
897 {
898 /*
899 * Continue the debugging loop.
900 */
901 case VINF_EM_DBG_STEP:
902 case VINF_EM_DBG_STOP:
903 case VINF_EM_DBG_EVENT:
904 case VINF_EM_DBG_STEPPED:
905 case VINF_EM_DBG_BREAKPOINT:
906 case VINF_EM_DBG_HYPER_STEPPED:
907 case VINF_EM_DBG_HYPER_BREAKPOINT:
908 case VINF_EM_DBG_HYPER_ASSERTION:
909 break;
910
911 /*
912 * Resuming execution (in some form) has to be done here if we got
913 * a hypervisor debug event.
914 */
915 case VINF_SUCCESS:
916 case VINF_EM_RESUME:
917 case VINF_EM_SUSPEND:
918 case VINF_EM_RESCHEDULE:
919 case VINF_EM_RESCHEDULE_RAW:
920 case VINF_EM_RESCHEDULE_REM:
921 case VINF_EM_HALT:
922 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
923 {
924#ifdef VBOX_WITH_RAW_MODE
925 rc = emR3RawResumeHyper(pVM, pVCpu);
926 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
927 continue;
928#else
929 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
930#endif
931 }
932 if (rc == VINF_SUCCESS)
933 rc = VINF_EM_RESCHEDULE;
934 return rc;
935
936 /*
937 * The debugger isn't attached.
938 * We'll simply turn the thing off since that's the easiest thing to do.
939 */
940 case VERR_DBGF_NOT_ATTACHED:
941 switch (VBOXSTRICTRC_VAL(rcLast))
942 {
943 case VINF_EM_DBG_HYPER_STEPPED:
944 case VINF_EM_DBG_HYPER_BREAKPOINT:
945 case VINF_EM_DBG_HYPER_ASSERTION:
946 case VERR_TRPM_PANIC:
947 case VERR_TRPM_DONT_PANIC:
948 case VERR_VMM_RING0_ASSERTION:
949 case VERR_VMM_HYPER_CR3_MISMATCH:
950 case VERR_VMM_RING3_CALL_DISABLED:
951 return rcLast;
952 }
953 return VINF_EM_OFF;
954
955 /*
956 * Status codes terminating the VM in one or another sense.
957 */
958 case VINF_EM_TERMINATE:
959 case VINF_EM_OFF:
960 case VINF_EM_RESET:
961 case VINF_EM_NO_MEMORY:
962 case VINF_EM_RAW_STALE_SELECTOR:
963 case VINF_EM_RAW_IRET_TRAP:
964 case VERR_TRPM_PANIC:
965 case VERR_TRPM_DONT_PANIC:
966 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
967 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
968 case VERR_VMM_RING0_ASSERTION:
969 case VERR_VMM_HYPER_CR3_MISMATCH:
970 case VERR_VMM_RING3_CALL_DISABLED:
971 case VERR_INTERNAL_ERROR:
972 case VERR_INTERNAL_ERROR_2:
973 case VERR_INTERNAL_ERROR_3:
974 case VERR_INTERNAL_ERROR_4:
975 case VERR_INTERNAL_ERROR_5:
976 case VERR_IPE_UNEXPECTED_STATUS:
977 case VERR_IPE_UNEXPECTED_INFO_STATUS:
978 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
979 return rc;
980
981 /*
982 * The rest is unexpected, and will keep us here.
983 */
984 default:
985 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
986 break;
987 }
988 } /* debug for ever */
989}
990
991
992/**
993 * Steps recompiled code.
994 *
995 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
996 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
997 *
998 * @param pVM The cross context VM structure.
999 * @param pVCpu The cross context virtual CPU structure.
1000 */
1001static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1002{
1003 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1004
1005#ifdef VBOX_WITH_REM
1006 EMRemLock(pVM);
1007
1008 /*
1009 * Switch to REM, step instruction, switch back.
1010 */
1011 int rc = REMR3State(pVM, pVCpu);
1012 if (RT_SUCCESS(rc))
1013 {
1014 rc = REMR3Step(pVM, pVCpu);
1015 REMR3StateBack(pVM, pVCpu);
1016 }
1017 EMRemUnlock(pVM);
1018
1019#else
1020 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1021#endif
1022
1023 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1024 return rc;
1025}
1026
1027
1028/**
1029 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1030 * critical section.
1031 *
1032 * @returns false - new fInREMState value.
1033 * @param pVM The cross context VM structure.
1034 * @param pVCpu The cross context virtual CPU structure.
1035 */
1036DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1037{
1038#ifdef VBOX_WITH_REM
1039 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1040 REMR3StateBack(pVM, pVCpu);
1041 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1042
1043 EMRemUnlock(pVM);
1044#endif
1045 return false;
1046}
1047
1048
1049/**
1050 * Executes recompiled code.
1051 *
1052 * This function contains the recompiler version of the inner
1053 * execution loop (the outer loop being in EMR3ExecuteVM()).
1054 *
1055 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1056 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1057 *
1058 * @param pVM The cross context VM structure.
1059 * @param pVCpu The cross context virtual CPU structure.
1060 * @param pfFFDone Where to store an indicator telling whether or not
1061 * FFs were done before returning.
1062 *
1063 */
1064static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1065{
1066#ifdef LOG_ENABLED
1067 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1068 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1069
1070 if (pCtx->eflags.Bits.u1VM)
1071 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1072 else
1073 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1074#endif
1075 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1076
1077#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1078 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1079 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1080 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1081#endif
1082
1083 /*
1084 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1085 * or the REM suggests raw-mode execution.
1086 */
1087 *pfFFDone = false;
1088#ifdef VBOX_WITH_REM
1089 bool fInREMState = false;
1090#else
1091 uint32_t cLoops = 0;
1092#endif
1093 int rc = VINF_SUCCESS;
1094 for (;;)
1095 {
1096#ifdef VBOX_WITH_REM
1097 /*
1098 * Lock REM and update the state if not already in sync.
1099 *
1100 * Note! Big lock, but you are not supposed to own any lock when
1101 * coming in here.
1102 */
1103 if (!fInREMState)
1104 {
1105 EMRemLock(pVM);
1106 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1107
1108 /* Flush the recompiler translation blocks if the VCPU has changed,
1109 also force a full CPU state resync. */
1110 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1111 {
1112 REMFlushTBs(pVM);
1113 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1114 }
1115 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1116
1117 rc = REMR3State(pVM, pVCpu);
1118
1119 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1120 if (RT_FAILURE(rc))
1121 break;
1122 fInREMState = true;
1123
1124 /*
1125 * We might have missed the raising of VMREQ, TIMER and some other
1126 * important FFs while we were busy switching the state. So, check again.
1127 */
1128 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1129 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1130 {
1131 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1132 goto l_REMDoForcedActions;
1133 }
1134 }
1135#endif
1136
1137 /*
1138 * Execute REM.
1139 */
1140 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1141 {
1142 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1143#ifdef VBOX_WITH_REM
1144 rc = REMR3Run(pVM, pVCpu);
1145#else
1146 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1147#endif
1148 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1149 }
1150 else
1151 {
1152 /* Give up this time slice; virtual time continues */
1153 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1154 RTThreadSleep(5);
1155 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1156 rc = VINF_SUCCESS;
1157 }
1158
1159 /*
1160 * Deal with high priority post execution FFs before doing anything
1161 * else. Sync back the state and leave the lock to be on the safe side.
1162 */
1163 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1164 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1165 {
1166#ifdef VBOX_WITH_REM
1167 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1168#endif
1169 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1170 }
1171
1172 /*
1173 * Process the returned status code.
1174 */
1175 if (rc != VINF_SUCCESS)
1176 {
1177 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1178 break;
1179 if (rc != VINF_REM_INTERRUPED_FF)
1180 {
1181#ifndef VBOX_WITH_REM
1182 /* Try dodge unimplemented IEM trouble by reschduling. */
1183 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1184 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1185 {
1186 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1187 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1188 {
1189 rc = VINF_EM_RESCHEDULE;
1190 break;
1191 }
1192 }
1193#endif
1194
1195 /*
1196 * Anything which is not known to us means an internal error
1197 * and the termination of the VM!
1198 */
1199 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1200 break;
1201 }
1202 }
1203
1204
1205 /*
1206 * Check and execute forced actions.
1207 *
1208 * Sync back the VM state and leave the lock before calling any of
1209 * these, you never know what's going to happen here.
1210 */
1211#ifdef VBOX_HIGH_RES_TIMERS_HACK
1212 TMTimerPollVoid(pVM, pVCpu);
1213#endif
1214 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1215 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1216 || VMCPU_FF_IS_PENDING(pVCpu,
1217 VMCPU_FF_ALL_REM_MASK
1218 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1219 {
1220#ifdef VBOX_WITH_REM
1221l_REMDoForcedActions:
1222 if (fInREMState)
1223 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1224#endif
1225 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1226 rc = emR3ForcedActions(pVM, pVCpu, rc);
1227 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1228 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1229 if ( rc != VINF_SUCCESS
1230 && rc != VINF_EM_RESCHEDULE_REM)
1231 {
1232 *pfFFDone = true;
1233 break;
1234 }
1235 }
1236
1237#ifndef VBOX_WITH_REM
1238 /*
1239 * Have to check if we can get back to fast execution mode every so often.
1240 */
1241 if (!(++cLoops & 7))
1242 {
1243 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1244 if ( enmCheck != EMSTATE_REM
1245 && enmCheck != EMSTATE_IEM_THEN_REM)
1246 return VINF_EM_RESCHEDULE;
1247 }
1248#endif
1249
1250 } /* The Inner Loop, recompiled execution mode version. */
1251
1252
1253#ifdef VBOX_WITH_REM
1254 /*
1255 * Returning. Sync back the VM state if required.
1256 */
1257 if (fInREMState)
1258 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1259#endif
1260
1261 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1262 return rc;
1263}
1264
1265
1266#ifdef DEBUG
1267
1268int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1269{
1270 EMSTATE enmOldState = pVCpu->em.s.enmState;
1271
1272 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1273
1274 Log(("Single step BEGIN:\n"));
1275 for (uint32_t i = 0; i < cIterations; i++)
1276 {
1277 DBGFR3PrgStep(pVCpu);
1278 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1279 emR3RemStep(pVM, pVCpu);
1280 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1281 break;
1282 }
1283 Log(("Single step END:\n"));
1284 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1285 pVCpu->em.s.enmState = enmOldState;
1286 return VINF_EM_RESCHEDULE;
1287}
1288
1289#endif /* DEBUG */
1290
1291
1292/**
1293 * Try execute the problematic code in IEM first, then fall back on REM if there
1294 * is too much of it or if IEM doesn't implement something.
1295 *
1296 * @returns Strict VBox status code from IEMExecLots.
1297 * @param pVM The cross context VM structure.
1298 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1299 * @param pfFFDone Force flags done indicator.
1300 *
1301 * @thread EMT(pVCpu)
1302 */
1303static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1304{
1305 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1306 *pfFFDone = false;
1307
1308 /*
1309 * Execute in IEM for a while.
1310 */
1311 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1312 {
1313 uint32_t cInstructions;
1314 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1315 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1316 if (rcStrict != VINF_SUCCESS)
1317 {
1318 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1319 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1320 break;
1321
1322 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1323 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1324 return rcStrict;
1325 }
1326
1327 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1328 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1329 {
1330 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1331 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1332 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1333 pVCpu->em.s.enmState = enmNewState;
1334 return VINF_SUCCESS;
1335 }
1336
1337 /*
1338 * Check for pending actions.
1339 */
1340 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1341 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1342 return VINF_SUCCESS;
1343 }
1344
1345 /*
1346 * Switch to REM.
1347 */
1348 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1349 pVCpu->em.s.enmState = EMSTATE_REM;
1350 return VINF_SUCCESS;
1351}
1352
1353
1354/**
1355 * Decides whether to execute RAW, HWACC or REM.
1356 *
1357 * @returns new EM state
1358 * @param pVM The cross context VM structure.
1359 * @param pVCpu The cross context virtual CPU structure.
1360 * @param pCtx Pointer to the guest CPU context.
1361 */
1362EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1363{
1364 /*
1365 * When forcing raw-mode execution, things are simple.
1366 */
1367 if (pVCpu->em.s.fForceRAW)
1368 return EMSTATE_RAW;
1369
1370 /*
1371 * We stay in the wait for SIPI state unless explicitly told otherwise.
1372 */
1373 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1374 return EMSTATE_WAIT_SIPI;
1375
1376 /*
1377 * Execute everything in IEM?
1378 */
1379 if (pVM->em.s.fIemExecutesAll)
1380 return EMSTATE_IEM;
1381
1382 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1383 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1384 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1385
1386 X86EFLAGS EFlags = pCtx->eflags;
1387 if (HMIsEnabled(pVM))
1388 {
1389 /*
1390 * Hardware accelerated raw-mode:
1391 */
1392 if ( EMIsHwVirtExecutionEnabled(pVM)
1393 && HMR3CanExecuteGuest(pVM, pCtx))
1394 return EMSTATE_HM;
1395
1396 /*
1397 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1398 * turns off monitoring features essential for raw mode!
1399 */
1400 return EMSTATE_IEM_THEN_REM;
1401 }
1402
1403 /*
1404 * Standard raw-mode:
1405 *
1406 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1407 * or 32 bits protected mode ring 0 code
1408 *
1409 * The tests are ordered by the likelihood of being true during normal execution.
1410 */
1411 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1412 {
1413 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1414 return EMSTATE_REM;
1415 }
1416
1417# ifndef VBOX_RAW_V86
1418 if (EFlags.u32 & X86_EFL_VM) {
1419 Log2(("raw mode refused: VM_MASK\n"));
1420 return EMSTATE_REM;
1421 }
1422# endif
1423
1424 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1425 uint32_t u32CR0 = pCtx->cr0;
1426 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1427 {
1428 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1429 return EMSTATE_REM;
1430 }
1431
1432 if (pCtx->cr4 & X86_CR4_PAE)
1433 {
1434 uint32_t u32Dummy, u32Features;
1435
1436 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1437 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1438 return EMSTATE_REM;
1439 }
1440
1441 unsigned uSS = pCtx->ss.Sel;
1442 if ( pCtx->eflags.Bits.u1VM
1443 || (uSS & X86_SEL_RPL) == 3)
1444 {
1445 if (!EMIsRawRing3Enabled(pVM))
1446 return EMSTATE_REM;
1447
1448 if (!(EFlags.u32 & X86_EFL_IF))
1449 {
1450 Log2(("raw mode refused: IF (RawR3)\n"));
1451 return EMSTATE_REM;
1452 }
1453
1454 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1455 {
1456 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1457 return EMSTATE_REM;
1458 }
1459 }
1460 else
1461 {
1462 if (!EMIsRawRing0Enabled(pVM))
1463 return EMSTATE_REM;
1464
1465 if (EMIsRawRing1Enabled(pVM))
1466 {
1467 /* Only ring 0 and 1 supervisor code. */
1468 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1469 {
1470 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1471 return EMSTATE_REM;
1472 }
1473 }
1474 /* Only ring 0 supervisor code. */
1475 else if ((uSS & X86_SEL_RPL) != 0)
1476 {
1477 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1478 return EMSTATE_REM;
1479 }
1480
1481 // Let's start with pure 32 bits ring 0 code first
1482 /** @todo What's pure 32-bit mode? flat? */
1483 if ( !(pCtx->ss.Attr.n.u1DefBig)
1484 || !(pCtx->cs.Attr.n.u1DefBig))
1485 {
1486 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1487 return EMSTATE_REM;
1488 }
1489
1490 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1491 if (!(u32CR0 & X86_CR0_WP))
1492 {
1493 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1494 return EMSTATE_REM;
1495 }
1496
1497# ifdef VBOX_WITH_RAW_MODE
1498 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1499 {
1500 Log2(("raw r0 mode forced: patch code\n"));
1501# ifdef VBOX_WITH_SAFE_STR
1502 Assert(pCtx->tr.Sel);
1503# endif
1504 return EMSTATE_RAW;
1505 }
1506# endif /* VBOX_WITH_RAW_MODE */
1507
1508# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1509 if (!(EFlags.u32 & X86_EFL_IF))
1510 {
1511 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1512 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1513 return EMSTATE_REM;
1514 }
1515# endif
1516
1517# ifndef VBOX_WITH_RAW_RING1
1518 /** @todo still necessary??? */
1519 if (EFlags.Bits.u2IOPL != 0)
1520 {
1521 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1522 return EMSTATE_REM;
1523 }
1524# endif
1525 }
1526
1527 /*
1528 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1529 */
1530 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1531 {
1532 Log2(("raw mode refused: stale CS\n"));
1533 return EMSTATE_REM;
1534 }
1535 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1536 {
1537 Log2(("raw mode refused: stale SS\n"));
1538 return EMSTATE_REM;
1539 }
1540 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1541 {
1542 Log2(("raw mode refused: stale DS\n"));
1543 return EMSTATE_REM;
1544 }
1545 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1546 {
1547 Log2(("raw mode refused: stale ES\n"));
1548 return EMSTATE_REM;
1549 }
1550 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1551 {
1552 Log2(("raw mode refused: stale FS\n"));
1553 return EMSTATE_REM;
1554 }
1555 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1556 {
1557 Log2(("raw mode refused: stale GS\n"));
1558 return EMSTATE_REM;
1559 }
1560
1561# ifdef VBOX_WITH_SAFE_STR
1562 if (pCtx->tr.Sel == 0)
1563 {
1564 Log(("Raw mode refused -> TR=0\n"));
1565 return EMSTATE_REM;
1566 }
1567# endif
1568
1569 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1570 return EMSTATE_RAW;
1571}
1572
1573
1574/**
1575 * Executes all high priority post execution force actions.
1576 *
1577 * @returns rc or a fatal status code.
1578 *
1579 * @param pVM The cross context VM structure.
1580 * @param pVCpu The cross context virtual CPU structure.
1581 * @param rc The current rc.
1582 */
1583int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1584{
1585 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1586
1587 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1588 PDMCritSectBothFF(pVCpu);
1589
1590 /* Update CR3 (Nested Paging case for HM). */
1591 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1592 {
1593 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1594 if (RT_FAILURE(rc2))
1595 return rc2;
1596 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1597 }
1598
1599 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1600 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1601 {
1602 if (CPUMIsGuestInPAEMode(pVCpu))
1603 {
1604 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1605 AssertPtr(pPdpes);
1606
1607 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1608 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1609 }
1610 else
1611 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1612 }
1613
1614 /* IEM has pending work (typically memory write after INS instruction). */
1615 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1616 rc = VBOXSTRICTRC_TODO(IEMR3ProcessForceFlag(pVM, pVCpu, rc));
1617
1618 /* IOM has pending work (comitting an I/O or MMIO write). */
1619 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1620 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
1621
1622#ifdef VBOX_WITH_RAW_MODE
1623 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1624 CSAMR3DoPendingAction(pVM, pVCpu);
1625#endif
1626
1627 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1628 {
1629 if ( rc > VINF_EM_NO_MEMORY
1630 && rc <= VINF_EM_LAST)
1631 rc = VINF_EM_NO_MEMORY;
1632 }
1633
1634 return rc;
1635}
1636
1637
1638/**
1639 * Executes all pending forced actions.
1640 *
1641 * Forced actions can cause execution delays and execution
1642 * rescheduling. The first we deal with using action priority, so
1643 * that for instance pending timers aren't scheduled and ran until
1644 * right before execution. The rescheduling we deal with using
1645 * return codes. The same goes for VM termination, only in that case
1646 * we exit everything.
1647 *
1648 * @returns VBox status code of equal or greater importance/severity than rc.
1649 * The most important ones are: VINF_EM_RESCHEDULE,
1650 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1651 *
1652 * @param pVM The cross context VM structure.
1653 * @param pVCpu The cross context virtual CPU structure.
1654 * @param rc The current rc.
1655 *
1656 */
1657int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1658{
1659 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1660#ifdef VBOX_STRICT
1661 int rcIrq = VINF_SUCCESS;
1662#endif
1663 int rc2;
1664#define UPDATE_RC() \
1665 do { \
1666 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1667 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1668 break; \
1669 if (!rc || rc2 < rc) \
1670 rc = rc2; \
1671 } while (0)
1672 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1673
1674 /*
1675 * Post execution chunk first.
1676 */
1677 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1678 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1679 {
1680 /*
1681 * EMT Rendezvous (must be serviced before termination).
1682 */
1683 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1684 {
1685 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1686 UPDATE_RC();
1687 /** @todo HACK ALERT! The following test is to make sure EM+TM
1688 * thinks the VM is stopped/reset before the next VM state change
1689 * is made. We need a better solution for this, or at least make it
1690 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1691 * VINF_EM_SUSPEND). */
1692 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1693 {
1694 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1695 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1696 return rc;
1697 }
1698 }
1699
1700 /*
1701 * State change request (cleared by vmR3SetStateLocked).
1702 */
1703 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1704 {
1705 VMSTATE enmState = VMR3GetState(pVM);
1706 switch (enmState)
1707 {
1708 case VMSTATE_FATAL_ERROR:
1709 case VMSTATE_FATAL_ERROR_LS:
1710 case VMSTATE_GURU_MEDITATION:
1711 case VMSTATE_GURU_MEDITATION_LS:
1712 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1713 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1714 return VINF_EM_SUSPEND;
1715
1716 case VMSTATE_DESTROYING:
1717 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1718 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1719 return VINF_EM_TERMINATE;
1720
1721 default:
1722 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1723 }
1724 }
1725
1726 /*
1727 * Debugger Facility polling.
1728 */
1729 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1730 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1731 {
1732 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1733 UPDATE_RC();
1734 }
1735
1736 /*
1737 * Postponed reset request.
1738 */
1739 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1740 {
1741 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1742 UPDATE_RC();
1743 }
1744
1745#ifdef VBOX_WITH_RAW_MODE
1746 /*
1747 * CSAM page scanning.
1748 */
1749 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1750 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1751 {
1752 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1753
1754 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1755 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1756
1757 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1758 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1759 }
1760#endif
1761
1762 /*
1763 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1764 */
1765 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1766 {
1767 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1768 UPDATE_RC();
1769 if (rc == VINF_EM_NO_MEMORY)
1770 return rc;
1771 }
1772
1773 /* check that we got them all */
1774 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1775 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1776 }
1777
1778 /*
1779 * Normal priority then.
1780 * (Executed in no particular order.)
1781 */
1782 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1783 {
1784 /*
1785 * PDM Queues are pending.
1786 */
1787 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1788 PDMR3QueueFlushAll(pVM);
1789
1790 /*
1791 * PDM DMA transfers are pending.
1792 */
1793 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1794 PDMR3DmaRun(pVM);
1795
1796 /*
1797 * EMT Rendezvous (make sure they are handled before the requests).
1798 */
1799 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1800 {
1801 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1802 UPDATE_RC();
1803 /** @todo HACK ALERT! The following test is to make sure EM+TM
1804 * thinks the VM is stopped/reset before the next VM state change
1805 * is made. We need a better solution for this, or at least make it
1806 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1807 * VINF_EM_SUSPEND). */
1808 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1809 {
1810 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1811 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1812 return rc;
1813 }
1814 }
1815
1816 /*
1817 * Requests from other threads.
1818 */
1819 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1820 {
1821 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1822 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1823 {
1824 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1825 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1826 return rc2;
1827 }
1828 UPDATE_RC();
1829 /** @todo HACK ALERT! The following test is to make sure EM+TM
1830 * thinks the VM is stopped/reset before the next VM state change
1831 * is made. We need a better solution for this, or at least make it
1832 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1833 * VINF_EM_SUSPEND). */
1834 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1835 {
1836 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1837 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1838 return rc;
1839 }
1840 }
1841
1842#ifdef VBOX_WITH_REM
1843 /* Replay the handler notification changes. */
1844 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1845 {
1846 /* Try not to cause deadlocks. */
1847 if ( pVM->cCpus == 1
1848 || ( !PGMIsLockOwner(pVM)
1849 && !IOMIsLockWriteOwner(pVM))
1850 )
1851 {
1852 EMRemLock(pVM);
1853 REMR3ReplayHandlerNotifications(pVM);
1854 EMRemUnlock(pVM);
1855 }
1856 }
1857#endif
1858
1859 /* check that we got them all */
1860 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1861 }
1862
1863 /*
1864 * Normal priority then. (per-VCPU)
1865 * (Executed in no particular order.)
1866 */
1867 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1868 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1869 {
1870 /*
1871 * Requests from other threads.
1872 */
1873 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1874 {
1875 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1876 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1877 {
1878 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1879 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1880 return rc2;
1881 }
1882 UPDATE_RC();
1883 /** @todo HACK ALERT! The following test is to make sure EM+TM
1884 * thinks the VM is stopped/reset before the next VM state change
1885 * is made. We need a better solution for this, or at least make it
1886 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1887 * VINF_EM_SUSPEND). */
1888 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1889 {
1890 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1891 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1892 return rc;
1893 }
1894 }
1895
1896 /*
1897 * Forced unhalting of EMT.
1898 */
1899 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
1900 {
1901 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
1902 if (rc == VINF_EM_HALT)
1903 rc = VINF_EM_RESCHEDULE;
1904 else
1905 {
1906 rc2 = VINF_EM_RESCHEDULE;
1907 UPDATE_RC();
1908 }
1909 }
1910
1911 /* check that we got them all */
1912 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST | VMCPU_FF_UNHALT)));
1913 }
1914
1915 /*
1916 * High priority pre execution chunk last.
1917 * (Executed in ascending priority order.)
1918 */
1919 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1920 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1921 {
1922 /*
1923 * Timers before interrupts.
1924 */
1925 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1926 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1927 TMR3TimerQueuesDo(pVM);
1928
1929#ifdef VBOX_WITH_NEW_APIC
1930 /*
1931 * Pick up asynchronously posted interrupts into the APIC.
1932 */
1933 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1934 APICUpdatePendingInterrupts(pVCpu);
1935#endif
1936
1937 /*
1938 * The instruction following an emulated STI should *always* be executed!
1939 *
1940 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1941 * the eip is the same as the inhibited instr address. Before we
1942 * are able to execute this instruction in raw mode (iret to
1943 * guest code) an external interrupt might force a world switch
1944 * again. Possibly allowing a guest interrupt to be dispatched
1945 * in the process. This could break the guest. Sounds very
1946 * unlikely, but such timing sensitive problem are not as rare as
1947 * you might think.
1948 */
1949 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1950 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1951 {
1952 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1953 {
1954 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1955 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1956 }
1957 else
1958 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1959 }
1960
1961 /*
1962 * Interrupts.
1963 */
1964 bool fWakeupPending = false;
1965 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1966 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1967 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1968 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1969#ifdef VBOX_WITH_RAW_MODE
1970 && PATMAreInterruptsEnabled(pVM)
1971#else
1972 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1973#endif
1974 && !HMR3IsEventPending(pVCpu))
1975 {
1976 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1977 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1978 {
1979 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1980 /** @todo this really isn't nice, should properly handle this */
1981 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1982 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1983 rc2 = VINF_EM_RESCHEDULE;
1984#ifdef VBOX_STRICT
1985 rcIrq = rc2;
1986#endif
1987 UPDATE_RC();
1988 /* Reschedule required: We must not miss the wakeup below! */
1989 fWakeupPending = true;
1990 }
1991 }
1992
1993 /*
1994 * Allocate handy pages.
1995 */
1996 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1997 {
1998 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1999 UPDATE_RC();
2000 }
2001
2002 /*
2003 * Debugger Facility request.
2004 */
2005 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2006 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2007 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2008 {
2009 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2010 UPDATE_RC();
2011 }
2012
2013 /*
2014 * EMT Rendezvous (must be serviced before termination).
2015 */
2016 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2017 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2018 {
2019 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2020 UPDATE_RC();
2021 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2022 * stopped/reset before the next VM state change is made. We need a better
2023 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2024 * && rc >= VINF_EM_SUSPEND). */
2025 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2026 {
2027 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2028 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2029 return rc;
2030 }
2031 }
2032
2033 /*
2034 * State change request (cleared by vmR3SetStateLocked).
2035 */
2036 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2037 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2038 {
2039 VMSTATE enmState = VMR3GetState(pVM);
2040 switch (enmState)
2041 {
2042 case VMSTATE_FATAL_ERROR:
2043 case VMSTATE_FATAL_ERROR_LS:
2044 case VMSTATE_GURU_MEDITATION:
2045 case VMSTATE_GURU_MEDITATION_LS:
2046 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2047 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2048 return VINF_EM_SUSPEND;
2049
2050 case VMSTATE_DESTROYING:
2051 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2052 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2053 return VINF_EM_TERMINATE;
2054
2055 default:
2056 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2057 }
2058 }
2059
2060 /*
2061 * Out of memory? Since most of our fellow high priority actions may cause us
2062 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2063 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2064 * than us since we can terminate without allocating more memory.
2065 */
2066 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2067 {
2068 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2069 UPDATE_RC();
2070 if (rc == VINF_EM_NO_MEMORY)
2071 return rc;
2072 }
2073
2074 /*
2075 * If the virtual sync clock is still stopped, make TM restart it.
2076 */
2077 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2078 TMR3VirtualSyncFF(pVM, pVCpu);
2079
2080#ifdef DEBUG
2081 /*
2082 * Debug, pause the VM.
2083 */
2084 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2085 {
2086 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2087 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2088 return VINF_EM_SUSPEND;
2089 }
2090#endif
2091
2092 /* check that we got them all */
2093 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2094 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2095 }
2096
2097#undef UPDATE_RC
2098 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2099 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2100 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2101 return rc;
2102}
2103
2104
2105/**
2106 * Check if the preset execution time cap restricts guest execution scheduling.
2107 *
2108 * @returns true if allowed, false otherwise
2109 * @param pVM The cross context VM structure.
2110 * @param pVCpu The cross context virtual CPU structure.
2111 */
2112bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2113{
2114 uint64_t u64UserTime, u64KernelTime;
2115
2116 if ( pVM->uCpuExecutionCap != 100
2117 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2118 {
2119 uint64_t u64TimeNow = RTTimeMilliTS();
2120 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2121 {
2122 /* New time slice. */
2123 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2124 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2125 pVCpu->em.s.u64TimeSliceExec = 0;
2126 }
2127 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2128
2129 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2130 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2131 return false;
2132 }
2133 return true;
2134}
2135
2136
2137/**
2138 * Execute VM.
2139 *
2140 * This function is the main loop of the VM. The emulation thread
2141 * calls this function when the VM has been successfully constructed
2142 * and we're ready for executing the VM.
2143 *
2144 * Returning from this function means that the VM is turned off or
2145 * suspended (state already saved) and deconstruction is next in line.
2146 *
2147 * All interaction from other thread are done using forced actions
2148 * and signaling of the wait object.
2149 *
2150 * @returns VBox status code, informational status codes may indicate failure.
2151 * @param pVM The cross context VM structure.
2152 * @param pVCpu The cross context virtual CPU structure.
2153 */
2154VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2155{
2156 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2157 pVM,
2158 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2159 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2160 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2161 pVCpu->em.s.fForceRAW));
2162 VM_ASSERT_EMT(pVM);
2163 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2164 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2165 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2166 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2167
2168 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2169 if (rc == 0)
2170 {
2171 /*
2172 * Start the virtual time.
2173 */
2174 TMR3NotifyResume(pVM, pVCpu);
2175
2176 /*
2177 * The Outer Main Loop.
2178 */
2179 bool fFFDone = false;
2180
2181 /* Reschedule right away to start in the right state. */
2182 rc = VINF_SUCCESS;
2183
2184 /* If resuming after a pause or a state load, restore the previous
2185 state or else we'll start executing code. Else, just reschedule. */
2186 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2187 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2188 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2189 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2190 else
2191 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2192 pVCpu->em.s.cIemThenRemInstructions = 0;
2193 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2194
2195 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2196 for (;;)
2197 {
2198 /*
2199 * Before we can schedule anything (we're here because
2200 * scheduling is required) we must service any pending
2201 * forced actions to avoid any pending action causing
2202 * immediate rescheduling upon entering an inner loop
2203 *
2204 * Do forced actions.
2205 */
2206 if ( !fFFDone
2207 && RT_SUCCESS(rc)
2208 && rc != VINF_EM_TERMINATE
2209 && rc != VINF_EM_OFF
2210 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2211 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2212 {
2213 rc = emR3ForcedActions(pVM, pVCpu, rc);
2214 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2215 if ( ( rc == VINF_EM_RESCHEDULE_REM
2216 || rc == VINF_EM_RESCHEDULE_HM)
2217 && pVCpu->em.s.fForceRAW)
2218 rc = VINF_EM_RESCHEDULE_RAW;
2219 }
2220 else if (fFFDone)
2221 fFFDone = false;
2222
2223 /*
2224 * Now what to do?
2225 */
2226 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2227 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2228 switch (rc)
2229 {
2230 /*
2231 * Keep doing what we're currently doing.
2232 */
2233 case VINF_SUCCESS:
2234 break;
2235
2236 /*
2237 * Reschedule - to raw-mode execution.
2238 */
2239 case VINF_EM_RESCHEDULE_RAW:
2240 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2241 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2242 pVCpu->em.s.enmState = EMSTATE_RAW;
2243 break;
2244
2245 /*
2246 * Reschedule - to hardware accelerated raw-mode execution.
2247 */
2248 case VINF_EM_RESCHEDULE_HM:
2249 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2250 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2251 Assert(!pVCpu->em.s.fForceRAW);
2252 pVCpu->em.s.enmState = EMSTATE_HM;
2253 break;
2254
2255 /*
2256 * Reschedule - to recompiled execution.
2257 */
2258 case VINF_EM_RESCHEDULE_REM:
2259 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2260 if (HMIsEnabled(pVM))
2261 {
2262 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2263 enmOldState, EMSTATE_IEM_THEN_REM));
2264 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2265 {
2266 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2267 pVCpu->em.s.cIemThenRemInstructions = 0;
2268 }
2269 }
2270 else
2271 {
2272 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2273 pVCpu->em.s.enmState = EMSTATE_REM;
2274 }
2275 break;
2276
2277 /*
2278 * Resume.
2279 */
2280 case VINF_EM_RESUME:
2281 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2282 /* Don't reschedule in the halted or wait for SIPI case. */
2283 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2284 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2285 {
2286 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2287 break;
2288 }
2289 /* fall through and get scheduled. */
2290
2291 /*
2292 * Reschedule.
2293 */
2294 case VINF_EM_RESCHEDULE:
2295 {
2296 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2297 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2298 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2299 pVCpu->em.s.cIemThenRemInstructions = 0;
2300 pVCpu->em.s.enmState = enmState;
2301 break;
2302 }
2303
2304 /*
2305 * Halted.
2306 */
2307 case VINF_EM_HALT:
2308 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2309 pVCpu->em.s.enmState = EMSTATE_HALTED;
2310 break;
2311
2312 /*
2313 * Switch to the wait for SIPI state (application processor only)
2314 */
2315 case VINF_EM_WAIT_SIPI:
2316 Assert(pVCpu->idCpu != 0);
2317 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2318 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2319 break;
2320
2321
2322 /*
2323 * Suspend.
2324 */
2325 case VINF_EM_SUSPEND:
2326 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2327 Assert(enmOldState != EMSTATE_SUSPENDED);
2328 pVCpu->em.s.enmPrevState = enmOldState;
2329 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2330 break;
2331
2332 /*
2333 * Reset.
2334 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2335 */
2336 case VINF_EM_RESET:
2337 {
2338 if (pVCpu->idCpu == 0)
2339 {
2340 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2341 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2342 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2343 pVCpu->em.s.cIemThenRemInstructions = 0;
2344 pVCpu->em.s.enmState = enmState;
2345 }
2346 else
2347 {
2348 /* All other VCPUs go into the wait for SIPI state. */
2349 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2350 }
2351 break;
2352 }
2353
2354 /*
2355 * Power Off.
2356 */
2357 case VINF_EM_OFF:
2358 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2359 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2360 TMR3NotifySuspend(pVM, pVCpu);
2361 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2362 return rc;
2363
2364 /*
2365 * Terminate the VM.
2366 */
2367 case VINF_EM_TERMINATE:
2368 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2369 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2370 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2371 TMR3NotifySuspend(pVM, pVCpu);
2372 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2373 return rc;
2374
2375
2376 /*
2377 * Out of memory, suspend the VM and stuff.
2378 */
2379 case VINF_EM_NO_MEMORY:
2380 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2381 Assert(enmOldState != EMSTATE_SUSPENDED);
2382 pVCpu->em.s.enmPrevState = enmOldState;
2383 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2384 TMR3NotifySuspend(pVM, pVCpu);
2385 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2386
2387 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2388 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2389 if (rc != VINF_EM_SUSPEND)
2390 {
2391 if (RT_SUCCESS_NP(rc))
2392 {
2393 AssertLogRelMsgFailed(("%Rrc\n", rc));
2394 rc = VERR_EM_INTERNAL_ERROR;
2395 }
2396 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2397 }
2398 return rc;
2399
2400 /*
2401 * Guest debug events.
2402 */
2403 case VINF_EM_DBG_STEPPED:
2404 case VINF_EM_DBG_STOP:
2405 case VINF_EM_DBG_EVENT:
2406 case VINF_EM_DBG_BREAKPOINT:
2407 case VINF_EM_DBG_STEP:
2408 if (enmOldState == EMSTATE_RAW)
2409 {
2410 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2411 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2412 }
2413 else if (enmOldState == EMSTATE_HM)
2414 {
2415 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2416 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2417 }
2418 else if (enmOldState == EMSTATE_REM)
2419 {
2420 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2421 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2422 }
2423 else
2424 {
2425 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2426 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2427 }
2428 break;
2429
2430 /*
2431 * Hypervisor debug events.
2432 */
2433 case VINF_EM_DBG_HYPER_STEPPED:
2434 case VINF_EM_DBG_HYPER_BREAKPOINT:
2435 case VINF_EM_DBG_HYPER_ASSERTION:
2436 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2437 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2438 break;
2439
2440 /*
2441 * Triple fault.
2442 */
2443 case VINF_EM_TRIPLE_FAULT:
2444 if (!pVM->em.s.fGuruOnTripleFault)
2445 {
2446 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2447 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2448 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2449 continue;
2450 }
2451 /* Else fall through and trigger a guru. */
2452 case VERR_VMM_RING0_ASSERTION:
2453 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2454 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2455 break;
2456
2457 /*
2458 * Any error code showing up here other than the ones we
2459 * know and process above are considered to be FATAL.
2460 *
2461 * Unknown warnings and informational status codes are also
2462 * included in this.
2463 */
2464 default:
2465 if (RT_SUCCESS_NP(rc))
2466 {
2467 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2468 rc = VERR_EM_INTERNAL_ERROR;
2469 }
2470 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2471 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2472 break;
2473 }
2474
2475 /*
2476 * Act on state transition.
2477 */
2478 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2479 if (enmOldState != enmNewState)
2480 {
2481 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2482
2483 /* Clear MWait flags. */
2484 if ( enmOldState == EMSTATE_HALTED
2485 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2486 && ( enmNewState == EMSTATE_RAW
2487 || enmNewState == EMSTATE_HM
2488 || enmNewState == EMSTATE_REM
2489 || enmNewState == EMSTATE_IEM_THEN_REM
2490 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2491 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2492 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2493 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2494 {
2495 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2496 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2497 }
2498 }
2499 else
2500 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2501
2502 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2503 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2504
2505 /*
2506 * Act on the new state.
2507 */
2508 switch (enmNewState)
2509 {
2510 /*
2511 * Execute raw.
2512 */
2513 case EMSTATE_RAW:
2514#ifdef VBOX_WITH_RAW_MODE
2515 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2516#else
2517 AssertLogRelMsgFailed(("%Rrc\n", rc));
2518 rc = VERR_EM_INTERNAL_ERROR;
2519#endif
2520 break;
2521
2522 /*
2523 * Execute hardware accelerated raw.
2524 */
2525 case EMSTATE_HM:
2526 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2527 break;
2528
2529 /*
2530 * Execute recompiled.
2531 */
2532 case EMSTATE_REM:
2533 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2534 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2535 break;
2536
2537 /*
2538 * Execute in the interpreter.
2539 */
2540 case EMSTATE_IEM:
2541 {
2542#if 0 /* For testing purposes. */
2543 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2544 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2545 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2546 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2547 rc = VINF_SUCCESS;
2548 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2549#endif
2550 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2551 if (pVM->em.s.fIemExecutesAll)
2552 {
2553 Assert(rc != VINF_EM_RESCHEDULE_REM);
2554 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2555 Assert(rc != VINF_EM_RESCHEDULE_HM);
2556 }
2557 fFFDone = false;
2558 break;
2559 }
2560
2561 /*
2562 * Execute in IEM, hoping we can quickly switch aback to HM
2563 * or RAW execution. If our hopes fail, we go to REM.
2564 */
2565 case EMSTATE_IEM_THEN_REM:
2566 {
2567 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2568 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2569 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2570 break;
2571 }
2572
2573 /*
2574 * Application processor execution halted until SIPI.
2575 */
2576 case EMSTATE_WAIT_SIPI:
2577 /* no break */
2578 /*
2579 * hlt - execution halted until interrupt.
2580 */
2581 case EMSTATE_HALTED:
2582 {
2583 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2584 /* If HM (or someone else) store a pending interrupt in
2585 TRPM, it must be dispatched ASAP without any halting.
2586 Anything pending in TRPM has been accepted and the CPU
2587 should already be the right state to receive it. */
2588 if (TRPMHasTrap(pVCpu))
2589 rc = VINF_EM_RESCHEDULE;
2590 /* MWAIT has a special extension where it's woken up when
2591 an interrupt is pending even when IF=0. */
2592 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2593 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2594 {
2595 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2596 if (rc == VINF_SUCCESS)
2597 {
2598#ifdef VBOX_WITH_NEW_APIC
2599 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2600 APICUpdatePendingInterrupts(pVCpu);
2601#endif
2602 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2603 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2604 {
2605 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2606 rc = VINF_EM_RESCHEDULE;
2607 }
2608 }
2609 }
2610 else
2611 {
2612 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2613 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2614 check VMCPU_FF_UPDATE_APIC here. */
2615 if ( rc == VINF_SUCCESS
2616 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2617 {
2618 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2619 rc = VINF_EM_RESCHEDULE;
2620 }
2621 }
2622
2623 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2624 break;
2625 }
2626
2627 /*
2628 * Suspended - return to VM.cpp.
2629 */
2630 case EMSTATE_SUSPENDED:
2631 TMR3NotifySuspend(pVM, pVCpu);
2632 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2633 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2634 return VINF_EM_SUSPEND;
2635
2636 /*
2637 * Debugging in the guest.
2638 */
2639 case EMSTATE_DEBUG_GUEST_RAW:
2640 case EMSTATE_DEBUG_GUEST_HM:
2641 case EMSTATE_DEBUG_GUEST_IEM:
2642 case EMSTATE_DEBUG_GUEST_REM:
2643 TMR3NotifySuspend(pVM, pVCpu);
2644 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2645 TMR3NotifyResume(pVM, pVCpu);
2646 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2647 break;
2648
2649 /*
2650 * Debugging in the hypervisor.
2651 */
2652 case EMSTATE_DEBUG_HYPER:
2653 {
2654 TMR3NotifySuspend(pVM, pVCpu);
2655 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2656
2657 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2658 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2659 if (rc != VINF_SUCCESS)
2660 {
2661 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2662 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2663 else
2664 {
2665 /* switch to guru meditation mode */
2666 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2667 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2668 VMMR3FatalDump(pVM, pVCpu, rc);
2669 }
2670 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2671 return rc;
2672 }
2673
2674 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2675 TMR3NotifyResume(pVM, pVCpu);
2676 break;
2677 }
2678
2679 /*
2680 * Guru meditation takes place in the debugger.
2681 */
2682 case EMSTATE_GURU_MEDITATION:
2683 {
2684 TMR3NotifySuspend(pVM, pVCpu);
2685 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2686 VMMR3FatalDump(pVM, pVCpu, rc);
2687 emR3Debug(pVM, pVCpu, rc);
2688 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2689 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2690 return rc;
2691 }
2692
2693 /*
2694 * The states we don't expect here.
2695 */
2696 case EMSTATE_NONE:
2697 case EMSTATE_TERMINATING:
2698 default:
2699 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2700 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2701 TMR3NotifySuspend(pVM, pVCpu);
2702 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2703 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2704 return VERR_EM_INTERNAL_ERROR;
2705 }
2706 } /* The Outer Main Loop */
2707 }
2708 else
2709 {
2710 /*
2711 * Fatal error.
2712 */
2713 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2714 TMR3NotifySuspend(pVM, pVCpu);
2715 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2716 VMMR3FatalDump(pVM, pVCpu, rc);
2717 emR3Debug(pVM, pVCpu, rc);
2718 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2719 /** @todo change the VM state! */
2720 return rc;
2721 }
2722
2723 /* (won't ever get here). */
2724 AssertFailed();
2725}
2726
2727/**
2728 * Notify EM of a state change (used by FTM)
2729 *
2730 * @param pVM The cross context VM structure.
2731 */
2732VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2733{
2734 PVMCPU pVCpu = VMMGetCpu(pVM);
2735
2736 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2737 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2738 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2739 return VINF_SUCCESS;
2740}
2741
2742/**
2743 * Notify EM of a state change (used by FTM)
2744 *
2745 * @param pVM The cross context VM structure.
2746 */
2747VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2748{
2749 PVMCPU pVCpu = VMMGetCpu(pVM);
2750 EMSTATE enmCurState = pVCpu->em.s.enmState;
2751
2752 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2753 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2754 pVCpu->em.s.enmPrevState = enmCurState;
2755 return VINF_SUCCESS;
2756}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette