VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 65381

Last change on this file since 65381 was 64626, checked in by vboxsync, 8 years ago

Recompiler, VMM, Devices: Purge the old APIC and the VBOX_WITH_NEW_APIC define.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 124.9 KB
Line 
1/* $Id: EM.cpp 64626 2016-11-10 10:31:39Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#ifdef VBOX_WITH_REM
50# include <VBox/vmm/rem.h>
51#endif
52#include <VBox/vmm/apic.h>
53#include <VBox/vmm/tm.h>
54#include <VBox/vmm/mm.h>
55#include <VBox/vmm/ssm.h>
56#include <VBox/vmm/pdmapi.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/pdmqueue.h>
59#include <VBox/vmm/hm.h>
60#include <VBox/vmm/patm.h>
61#include "EMInternal.h"
62#include <VBox/vmm/vm.h>
63#include <VBox/vmm/uvm.h>
64#include <VBox/vmm/cpumdis.h>
65#include <VBox/dis.h>
66#include <VBox/disopcode.h>
67#include "VMMTracing.h"
68
69#include <iprt/asm.h>
70#include <iprt/string.h>
71#include <iprt/stream.h>
72#include <iprt/thread.h>
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
79#define EM_NOTIFY_HM
80#endif
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
88#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
89static const char *emR3GetStateName(EMSTATE enmState);
90#endif
91static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
92#if defined(VBOX_WITH_REM) || defined(DEBUG)
93static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
94#endif
95static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
96int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
97
98
99/**
100 * Initializes the EM.
101 *
102 * @returns VBox status code.
103 * @param pVM The cross context VM structure.
104 */
105VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
106{
107 LogFlow(("EMR3Init\n"));
108 /*
109 * Assert alignment and sizes.
110 */
111 AssertCompileMemberAlignment(VM, em.s, 32);
112 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
113 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
114
115 /*
116 * Init the structure.
117 */
118 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
119 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
120 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
121
122 bool fEnabled;
123 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileUser = !fEnabled;
126
127 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->fRecompileSupervisor = !fEnabled;
130
131#ifdef VBOX_WITH_RAW_RING1
132 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
133 AssertLogRelRCReturn(rc, rc);
134#else
135 pVM->fRawRing1Enabled = false; /* Disabled by default. */
136#endif
137
138 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
139 AssertLogRelRCReturn(rc, rc);
140
141 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
142 AssertLogRelRCReturn(rc, rc);
143 pVM->em.s.fGuruOnTripleFault = !fEnabled;
144 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
145 {
146 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
147 pVM->em.s.fGuruOnTripleFault = true;
148 }
149
150 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
151 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
152
153#ifdef VBOX_WITH_REM
154 /*
155 * Initialize the REM critical section.
156 */
157 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
158 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
159 AssertRCReturn(rc, rc);
160#endif
161
162 /*
163 * Saved state.
164 */
165 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
166 NULL, NULL, NULL,
167 NULL, emR3Save, NULL,
168 NULL, emR3Load, NULL);
169 if (RT_FAILURE(rc))
170 return rc;
171
172 for (VMCPUID i = 0; i < pVM->cCpus; i++)
173 {
174 PVMCPU pVCpu = &pVM->aCpus[i];
175
176 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
177 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
178 pVCpu->em.s.fForceRAW = false;
179
180 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
181#ifdef VBOX_WITH_RAW_MODE
182 if (!HMIsEnabled(pVM))
183 {
184 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
185 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
186 }
187#endif
188
189 /* Force reset of the time slice. */
190 pVCpu->em.s.u64TimeSliceStart = 0;
191
192# define EM_REG_COUNTER(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
194 AssertRC(rc);
195
196# define EM_REG_COUNTER_USED(a, b, c) \
197 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
198 AssertRC(rc);
199
200# define EM_REG_PROFILE(a, b, c) \
201 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
202 AssertRC(rc);
203
204# define EM_REG_PROFILE_ADV(a, b, c) \
205 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
206 AssertRC(rc);
207
208 /*
209 * Statistics.
210 */
211#ifdef VBOX_WITH_STATISTICS
212 PEMSTATS pStats;
213 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
214 if (RT_FAILURE(rc))
215 return rc;
216
217 pVCpu->em.s.pStatsR3 = pStats;
218 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
219 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
220
221 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
222 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
223
224 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
225 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226
227 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301
302 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
303 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
304
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391
392 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
393 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
397 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
418
419 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
420 pVCpu->em.s.pCliStatTree = 0;
421
422 /* these should be considered for release statistics. */
423 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
424 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
425 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
429 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
431 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
432 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
433 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
434 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
435
436#endif /* VBOX_WITH_STATISTICS */
437
438 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
439 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
440 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
441 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
442 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
443
444 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
445 }
446
447 emR3InitDbg(pVM);
448 return VINF_SUCCESS;
449}
450
451
452/**
453 * Applies relocations to data and code managed by this
454 * component. This function will be called at init and
455 * whenever the VMM need to relocate it self inside the GC.
456 *
457 * @param pVM The cross context VM structure.
458 */
459VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
460{
461 LogFlow(("EMR3Relocate\n"));
462 for (VMCPUID i = 0; i < pVM->cCpus; i++)
463 {
464 PVMCPU pVCpu = &pVM->aCpus[i];
465 if (pVCpu->em.s.pStatsR3)
466 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
467 }
468}
469
470
471/**
472 * Reset the EM state for a CPU.
473 *
474 * Called by EMR3Reset and hot plugging.
475 *
476 * @param pVCpu The cross context virtual CPU structure.
477 */
478VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
479{
480 pVCpu->em.s.fForceRAW = false;
481
482 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
483 out of the HALTED state here so that enmPrevState doesn't end up as
484 HALTED when EMR3Execute returns. */
485 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
486 {
487 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
488 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
489 }
490}
491
492
493/**
494 * Reset notification.
495 *
496 * @param pVM The cross context VM structure.
497 */
498VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
499{
500 Log(("EMR3Reset: \n"));
501 for (VMCPUID i = 0; i < pVM->cCpus; i++)
502 EMR3ResetCpu(&pVM->aCpus[i]);
503}
504
505
506/**
507 * Terminates the EM.
508 *
509 * Termination means cleaning up and freeing all resources,
510 * the VM it self is at this point powered off or suspended.
511 *
512 * @returns VBox status code.
513 * @param pVM The cross context VM structure.
514 */
515VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
516{
517 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
518
519#ifdef VBOX_WITH_REM
520 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
521#else
522 RT_NOREF(pVM);
523#endif
524 return VINF_SUCCESS;
525}
526
527
528/**
529 * Execute state save operation.
530 *
531 * @returns VBox status code.
532 * @param pVM The cross context VM structure.
533 * @param pSSM SSM operation handle.
534 */
535static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
536{
537 for (VMCPUID i = 0; i < pVM->cCpus; i++)
538 {
539 PVMCPU pVCpu = &pVM->aCpus[i];
540
541 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
542 AssertRCReturn(rc, rc);
543
544 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
545 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
546 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
547 AssertRCReturn(rc, rc);
548
549 /* Save mwait state. */
550 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
551 AssertRCReturn(rc, rc);
552 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
553 AssertRCReturn(rc, rc);
554 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
555 AssertRCReturn(rc, rc);
556 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
557 AssertRCReturn(rc, rc);
558 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
559 AssertRCReturn(rc, rc);
560 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
561 AssertRCReturn(rc, rc);
562 }
563 return VINF_SUCCESS;
564}
565
566
567/**
568 * Execute state load operation.
569 *
570 * @returns VBox status code.
571 * @param pVM The cross context VM structure.
572 * @param pSSM SSM operation handle.
573 * @param uVersion Data layout version.
574 * @param uPass The data pass.
575 */
576static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
577{
578 /*
579 * Validate version.
580 */
581 if ( uVersion > EM_SAVED_STATE_VERSION
582 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
583 {
584 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
585 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
586 }
587 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
588
589 /*
590 * Load the saved state.
591 */
592 for (VMCPUID i = 0; i < pVM->cCpus; i++)
593 {
594 PVMCPU pVCpu = &pVM->aCpus[i];
595
596 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
597 if (RT_FAILURE(rc))
598 pVCpu->em.s.fForceRAW = false;
599 AssertRCReturn(rc, rc);
600
601 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
602 {
603 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
604 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
605 AssertRCReturn(rc, rc);
606 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
607
608 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
609 }
610 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
611 {
612 /* Load mwait state. */
613 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
614 AssertRCReturn(rc, rc);
615 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
616 AssertRCReturn(rc, rc);
617 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
618 AssertRCReturn(rc, rc);
619 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
620 AssertRCReturn(rc, rc);
621 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
622 AssertRCReturn(rc, rc);
623 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
624 AssertRCReturn(rc, rc);
625 }
626
627 Assert(!pVCpu->em.s.pCliStatTree);
628 }
629 return VINF_SUCCESS;
630}
631
632
633/**
634 * Argument packet for emR3SetExecutionPolicy.
635 */
636struct EMR3SETEXECPOLICYARGS
637{
638 EMEXECPOLICY enmPolicy;
639 bool fEnforce;
640};
641
642
643/**
644 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
645 */
646static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
647{
648 /*
649 * Only the first CPU changes the variables.
650 */
651 if (pVCpu->idCpu == 0)
652 {
653 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
654 switch (pArgs->enmPolicy)
655 {
656 case EMEXECPOLICY_RECOMPILE_RING0:
657 pVM->fRecompileSupervisor = pArgs->fEnforce;
658 break;
659 case EMEXECPOLICY_RECOMPILE_RING3:
660 pVM->fRecompileUser = pArgs->fEnforce;
661 break;
662 case EMEXECPOLICY_IEM_ALL:
663 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
664 break;
665 default:
666 AssertFailedReturn(VERR_INVALID_PARAMETER);
667 }
668 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
669 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
670 }
671
672 /*
673 * Force rescheduling if in RAW, HM, IEM, or REM.
674 */
675 return pVCpu->em.s.enmState == EMSTATE_RAW
676 || pVCpu->em.s.enmState == EMSTATE_HM
677 || pVCpu->em.s.enmState == EMSTATE_IEM
678 || pVCpu->em.s.enmState == EMSTATE_REM
679 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
680 ? VINF_EM_RESCHEDULE
681 : VINF_SUCCESS;
682}
683
684
685/**
686 * Changes an execution scheduling policy parameter.
687 *
688 * This is used to enable or disable raw-mode / hardware-virtualization
689 * execution of user and supervisor code.
690 *
691 * @returns VINF_SUCCESS on success.
692 * @returns VINF_RESCHEDULE if a rescheduling might be required.
693 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
694 *
695 * @param pUVM The user mode VM handle.
696 * @param enmPolicy The scheduling policy to change.
697 * @param fEnforce Whether to enforce the policy or not.
698 */
699VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
700{
701 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
702 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
703 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
704
705 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
706 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
707}
708
709
710/**
711 * Queries an execution scheduling policy parameter.
712 *
713 * @returns VBox status code
714 * @param pUVM The user mode VM handle.
715 * @param enmPolicy The scheduling policy to query.
716 * @param pfEnforced Where to return the current value.
717 */
718VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
719{
720 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
721 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
722 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
723 PVM pVM = pUVM->pVM;
724 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
725
726 /* No need to bother EMTs with a query. */
727 switch (enmPolicy)
728 {
729 case EMEXECPOLICY_RECOMPILE_RING0:
730 *pfEnforced = pVM->fRecompileSupervisor;
731 break;
732 case EMEXECPOLICY_RECOMPILE_RING3:
733 *pfEnforced = pVM->fRecompileUser;
734 break;
735 case EMEXECPOLICY_IEM_ALL:
736 *pfEnforced = pVM->em.s.fIemExecutesAll;
737 break;
738 default:
739 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
740 }
741
742 return VINF_SUCCESS;
743}
744
745
746/**
747 * Raise a fatal error.
748 *
749 * Safely terminate the VM with full state report and stuff. This function
750 * will naturally never return.
751 *
752 * @param pVCpu The cross context virtual CPU structure.
753 * @param rc VBox status code.
754 */
755VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
756{
757 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
758 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
759}
760
761
762#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
763/**
764 * Gets the EM state name.
765 *
766 * @returns pointer to read only state name,
767 * @param enmState The state.
768 */
769static const char *emR3GetStateName(EMSTATE enmState)
770{
771 switch (enmState)
772 {
773 case EMSTATE_NONE: return "EMSTATE_NONE";
774 case EMSTATE_RAW: return "EMSTATE_RAW";
775 case EMSTATE_HM: return "EMSTATE_HM";
776 case EMSTATE_IEM: return "EMSTATE_IEM";
777 case EMSTATE_REM: return "EMSTATE_REM";
778 case EMSTATE_HALTED: return "EMSTATE_HALTED";
779 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
780 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
781 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
782 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
783 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
784 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
785 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
786 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
787 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
788 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
789 default: return "Unknown!";
790 }
791}
792#endif /* LOG_ENABLED || VBOX_STRICT */
793
794
795/**
796 * Debug loop.
797 *
798 * @returns VBox status code for EM.
799 * @param pVM The cross context VM structure.
800 * @param pVCpu The cross context virtual CPU structure.
801 * @param rc Current EM VBox status code.
802 */
803static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
804{
805 for (;;)
806 {
807 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
808 const VBOXSTRICTRC rcLast = rc;
809
810 /*
811 * Debug related RC.
812 */
813 switch (VBOXSTRICTRC_VAL(rc))
814 {
815 /*
816 * Single step an instruction.
817 */
818 case VINF_EM_DBG_STEP:
819 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
820 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
821 || pVCpu->em.s.fForceRAW /* paranoia */)
822#ifdef VBOX_WITH_RAW_MODE
823 rc = emR3RawStep(pVM, pVCpu);
824#else
825 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
826#endif
827 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
828 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
829#ifdef VBOX_WITH_REM
830 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
831 rc = emR3RemStep(pVM, pVCpu);
832#endif
833 else
834 {
835 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
836 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
837 rc = VINF_EM_DBG_STEPPED;
838 }
839 break;
840
841 /*
842 * Simple events: stepped, breakpoint, stop/assertion.
843 */
844 case VINF_EM_DBG_STEPPED:
845 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
846 break;
847
848 case VINF_EM_DBG_BREAKPOINT:
849 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
850 break;
851
852 case VINF_EM_DBG_STOP:
853 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
854 break;
855
856 case VINF_EM_DBG_EVENT:
857 rc = DBGFR3EventHandlePending(pVM, pVCpu);
858 break;
859
860 case VINF_EM_DBG_HYPER_STEPPED:
861 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
862 break;
863
864 case VINF_EM_DBG_HYPER_BREAKPOINT:
865 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
866 break;
867
868 case VINF_EM_DBG_HYPER_ASSERTION:
869 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
870 RTLogFlush(NULL);
871 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
872 break;
873
874 /*
875 * Guru meditation.
876 */
877 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
878 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
879 break;
880 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
881 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
882 break;
883 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
884 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
885 break;
886
887 default: /** @todo don't use default for guru, but make special errors code! */
888 {
889 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
890 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
891 break;
892 }
893 }
894
895 /*
896 * Process the result.
897 */
898 switch (VBOXSTRICTRC_VAL(rc))
899 {
900 /*
901 * Continue the debugging loop.
902 */
903 case VINF_EM_DBG_STEP:
904 case VINF_EM_DBG_STOP:
905 case VINF_EM_DBG_EVENT:
906 case VINF_EM_DBG_STEPPED:
907 case VINF_EM_DBG_BREAKPOINT:
908 case VINF_EM_DBG_HYPER_STEPPED:
909 case VINF_EM_DBG_HYPER_BREAKPOINT:
910 case VINF_EM_DBG_HYPER_ASSERTION:
911 break;
912
913 /*
914 * Resuming execution (in some form) has to be done here if we got
915 * a hypervisor debug event.
916 */
917 case VINF_SUCCESS:
918 case VINF_EM_RESUME:
919 case VINF_EM_SUSPEND:
920 case VINF_EM_RESCHEDULE:
921 case VINF_EM_RESCHEDULE_RAW:
922 case VINF_EM_RESCHEDULE_REM:
923 case VINF_EM_HALT:
924 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
925 {
926#ifdef VBOX_WITH_RAW_MODE
927 rc = emR3RawResumeHyper(pVM, pVCpu);
928 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
929 continue;
930#else
931 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
932#endif
933 }
934 if (rc == VINF_SUCCESS)
935 rc = VINF_EM_RESCHEDULE;
936 return rc;
937
938 /*
939 * The debugger isn't attached.
940 * We'll simply turn the thing off since that's the easiest thing to do.
941 */
942 case VERR_DBGF_NOT_ATTACHED:
943 switch (VBOXSTRICTRC_VAL(rcLast))
944 {
945 case VINF_EM_DBG_HYPER_STEPPED:
946 case VINF_EM_DBG_HYPER_BREAKPOINT:
947 case VINF_EM_DBG_HYPER_ASSERTION:
948 case VERR_TRPM_PANIC:
949 case VERR_TRPM_DONT_PANIC:
950 case VERR_VMM_RING0_ASSERTION:
951 case VERR_VMM_HYPER_CR3_MISMATCH:
952 case VERR_VMM_RING3_CALL_DISABLED:
953 return rcLast;
954 }
955 return VINF_EM_OFF;
956
957 /*
958 * Status codes terminating the VM in one or another sense.
959 */
960 case VINF_EM_TERMINATE:
961 case VINF_EM_OFF:
962 case VINF_EM_RESET:
963 case VINF_EM_NO_MEMORY:
964 case VINF_EM_RAW_STALE_SELECTOR:
965 case VINF_EM_RAW_IRET_TRAP:
966 case VERR_TRPM_PANIC:
967 case VERR_TRPM_DONT_PANIC:
968 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
969 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
970 case VERR_VMM_RING0_ASSERTION:
971 case VERR_VMM_HYPER_CR3_MISMATCH:
972 case VERR_VMM_RING3_CALL_DISABLED:
973 case VERR_INTERNAL_ERROR:
974 case VERR_INTERNAL_ERROR_2:
975 case VERR_INTERNAL_ERROR_3:
976 case VERR_INTERNAL_ERROR_4:
977 case VERR_INTERNAL_ERROR_5:
978 case VERR_IPE_UNEXPECTED_STATUS:
979 case VERR_IPE_UNEXPECTED_INFO_STATUS:
980 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
981 return rc;
982
983 /*
984 * The rest is unexpected, and will keep us here.
985 */
986 default:
987 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
988 break;
989 }
990 } /* debug for ever */
991}
992
993
994#if defined(VBOX_WITH_REM) || defined(DEBUG)
995/**
996 * Steps recompiled code.
997 *
998 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
999 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1000 *
1001 * @param pVM The cross context VM structure.
1002 * @param pVCpu The cross context virtual CPU structure.
1003 */
1004static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1005{
1006 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1007
1008# ifdef VBOX_WITH_REM
1009 EMRemLock(pVM);
1010
1011 /*
1012 * Switch to REM, step instruction, switch back.
1013 */
1014 int rc = REMR3State(pVM, pVCpu);
1015 if (RT_SUCCESS(rc))
1016 {
1017 rc = REMR3Step(pVM, pVCpu);
1018 REMR3StateBack(pVM, pVCpu);
1019 }
1020 EMRemUnlock(pVM);
1021
1022# else
1023 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1024# endif
1025
1026 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1027 return rc;
1028}
1029#endif /* VBOX_WITH_REM || DEBUG */
1030
1031
1032#ifdef VBOX_WITH_REM
1033/**
1034 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1035 * critical section.
1036 *
1037 * @returns false - new fInREMState value.
1038 * @param pVM The cross context VM structure.
1039 * @param pVCpu The cross context virtual CPU structure.
1040 */
1041DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1042{
1043 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1044 REMR3StateBack(pVM, pVCpu);
1045 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1046
1047 EMRemUnlock(pVM);
1048 return false;
1049}
1050#endif
1051
1052
1053/**
1054 * Executes recompiled code.
1055 *
1056 * This function contains the recompiler version of the inner
1057 * execution loop (the outer loop being in EMR3ExecuteVM()).
1058 *
1059 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1060 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1061 *
1062 * @param pVM The cross context VM structure.
1063 * @param pVCpu The cross context virtual CPU structure.
1064 * @param pfFFDone Where to store an indicator telling whether or not
1065 * FFs were done before returning.
1066 *
1067 */
1068static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1069{
1070#ifdef LOG_ENABLED
1071 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1072 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1073
1074 if (pCtx->eflags.Bits.u1VM)
1075 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1076 else
1077 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1078#endif
1079 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1080
1081#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1082 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1083 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1084 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1085#endif
1086
1087 /*
1088 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1089 * or the REM suggests raw-mode execution.
1090 */
1091 *pfFFDone = false;
1092#ifdef VBOX_WITH_REM
1093 bool fInREMState = false;
1094#else
1095 uint32_t cLoops = 0;
1096#endif
1097 int rc = VINF_SUCCESS;
1098 for (;;)
1099 {
1100#ifdef VBOX_WITH_REM
1101 /*
1102 * Lock REM and update the state if not already in sync.
1103 *
1104 * Note! Big lock, but you are not supposed to own any lock when
1105 * coming in here.
1106 */
1107 if (!fInREMState)
1108 {
1109 EMRemLock(pVM);
1110 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1111
1112 /* Flush the recompiler translation blocks if the VCPU has changed,
1113 also force a full CPU state resync. */
1114 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1115 {
1116 REMFlushTBs(pVM);
1117 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1118 }
1119 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1120
1121 rc = REMR3State(pVM, pVCpu);
1122
1123 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1124 if (RT_FAILURE(rc))
1125 break;
1126 fInREMState = true;
1127
1128 /*
1129 * We might have missed the raising of VMREQ, TIMER and some other
1130 * important FFs while we were busy switching the state. So, check again.
1131 */
1132 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1133 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1134 {
1135 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1136 goto l_REMDoForcedActions;
1137 }
1138 }
1139#endif
1140
1141 /*
1142 * Execute REM.
1143 */
1144 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1145 {
1146 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1147#ifdef VBOX_WITH_REM
1148 rc = REMR3Run(pVM, pVCpu);
1149#else
1150 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1151#endif
1152 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1153 }
1154 else
1155 {
1156 /* Give up this time slice; virtual time continues */
1157 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1158 RTThreadSleep(5);
1159 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1160 rc = VINF_SUCCESS;
1161 }
1162
1163 /*
1164 * Deal with high priority post execution FFs before doing anything
1165 * else. Sync back the state and leave the lock to be on the safe side.
1166 */
1167 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1168 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1169 {
1170#ifdef VBOX_WITH_REM
1171 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1172#endif
1173 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1174 }
1175
1176 /*
1177 * Process the returned status code.
1178 */
1179 if (rc != VINF_SUCCESS)
1180 {
1181 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1182 break;
1183 if (rc != VINF_REM_INTERRUPED_FF)
1184 {
1185#ifndef VBOX_WITH_REM
1186 /* Try dodge unimplemented IEM trouble by reschduling. */
1187 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1188 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1189 {
1190 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1191 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1192 {
1193 rc = VINF_EM_RESCHEDULE;
1194 break;
1195 }
1196 }
1197#endif
1198
1199 /*
1200 * Anything which is not known to us means an internal error
1201 * and the termination of the VM!
1202 */
1203 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1204 break;
1205 }
1206 }
1207
1208
1209 /*
1210 * Check and execute forced actions.
1211 *
1212 * Sync back the VM state and leave the lock before calling any of
1213 * these, you never know what's going to happen here.
1214 */
1215#ifdef VBOX_HIGH_RES_TIMERS_HACK
1216 TMTimerPollVoid(pVM, pVCpu);
1217#endif
1218 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1219 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1220 || VMCPU_FF_IS_PENDING(pVCpu,
1221 VMCPU_FF_ALL_REM_MASK
1222 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1223 {
1224#ifdef VBOX_WITH_REM
1225l_REMDoForcedActions:
1226 if (fInREMState)
1227 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1228#endif
1229 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1230 rc = emR3ForcedActions(pVM, pVCpu, rc);
1231 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1232 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1233 if ( rc != VINF_SUCCESS
1234 && rc != VINF_EM_RESCHEDULE_REM)
1235 {
1236 *pfFFDone = true;
1237 break;
1238 }
1239 }
1240
1241#ifndef VBOX_WITH_REM
1242 /*
1243 * Have to check if we can get back to fast execution mode every so often.
1244 */
1245 if (!(++cLoops & 7))
1246 {
1247 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1248 if ( enmCheck != EMSTATE_REM
1249 && enmCheck != EMSTATE_IEM_THEN_REM)
1250 return VINF_EM_RESCHEDULE;
1251 }
1252#endif
1253
1254 } /* The Inner Loop, recompiled execution mode version. */
1255
1256
1257#ifdef VBOX_WITH_REM
1258 /*
1259 * Returning. Sync back the VM state if required.
1260 */
1261 if (fInREMState)
1262 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1263#endif
1264
1265 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1266 return rc;
1267}
1268
1269
1270#ifdef DEBUG
1271
1272int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1273{
1274 EMSTATE enmOldState = pVCpu->em.s.enmState;
1275
1276 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1277
1278 Log(("Single step BEGIN:\n"));
1279 for (uint32_t i = 0; i < cIterations; i++)
1280 {
1281 DBGFR3PrgStep(pVCpu);
1282 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1283 emR3RemStep(pVM, pVCpu);
1284 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1285 break;
1286 }
1287 Log(("Single step END:\n"));
1288 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1289 pVCpu->em.s.enmState = enmOldState;
1290 return VINF_EM_RESCHEDULE;
1291}
1292
1293#endif /* DEBUG */
1294
1295
1296/**
1297 * Try execute the problematic code in IEM first, then fall back on REM if there
1298 * is too much of it or if IEM doesn't implement something.
1299 *
1300 * @returns Strict VBox status code from IEMExecLots.
1301 * @param pVM The cross context VM structure.
1302 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1303 * @param pfFFDone Force flags done indicator.
1304 *
1305 * @thread EMT(pVCpu)
1306 */
1307static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1308{
1309 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1310 *pfFFDone = false;
1311
1312 /*
1313 * Execute in IEM for a while.
1314 */
1315 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1316 {
1317 uint32_t cInstructions;
1318 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1319 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1320 if (rcStrict != VINF_SUCCESS)
1321 {
1322 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1323 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1324 break;
1325
1326 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1327 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1328 return rcStrict;
1329 }
1330
1331 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1332 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1333 {
1334 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1335 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1336 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1337 pVCpu->em.s.enmState = enmNewState;
1338 return VINF_SUCCESS;
1339 }
1340
1341 /*
1342 * Check for pending actions.
1343 */
1344 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1345 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1346 return VINF_SUCCESS;
1347 }
1348
1349 /*
1350 * Switch to REM.
1351 */
1352 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1353 pVCpu->em.s.enmState = EMSTATE_REM;
1354 return VINF_SUCCESS;
1355}
1356
1357
1358/**
1359 * Decides whether to execute RAW, HWACC or REM.
1360 *
1361 * @returns new EM state
1362 * @param pVM The cross context VM structure.
1363 * @param pVCpu The cross context virtual CPU structure.
1364 * @param pCtx Pointer to the guest CPU context.
1365 */
1366EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1367{
1368 /*
1369 * When forcing raw-mode execution, things are simple.
1370 */
1371 if (pVCpu->em.s.fForceRAW)
1372 return EMSTATE_RAW;
1373
1374 /*
1375 * We stay in the wait for SIPI state unless explicitly told otherwise.
1376 */
1377 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1378 return EMSTATE_WAIT_SIPI;
1379
1380 /*
1381 * Execute everything in IEM?
1382 */
1383 if (pVM->em.s.fIemExecutesAll)
1384 return EMSTATE_IEM;
1385
1386 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1387 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1388 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1389
1390 X86EFLAGS EFlags = pCtx->eflags;
1391 if (HMIsEnabled(pVM))
1392 {
1393 /*
1394 * Hardware accelerated raw-mode:
1395 */
1396 if ( EMIsHwVirtExecutionEnabled(pVM)
1397 && HMR3CanExecuteGuest(pVM, pCtx))
1398 return EMSTATE_HM;
1399
1400 /*
1401 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1402 * turns off monitoring features essential for raw mode!
1403 */
1404 return EMSTATE_IEM_THEN_REM;
1405 }
1406
1407 /*
1408 * Standard raw-mode:
1409 *
1410 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1411 * or 32 bits protected mode ring 0 code
1412 *
1413 * The tests are ordered by the likelihood of being true during normal execution.
1414 */
1415 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1416 {
1417 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1418 return EMSTATE_REM;
1419 }
1420
1421# ifndef VBOX_RAW_V86
1422 if (EFlags.u32 & X86_EFL_VM) {
1423 Log2(("raw mode refused: VM_MASK\n"));
1424 return EMSTATE_REM;
1425 }
1426# endif
1427
1428 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1429 uint32_t u32CR0 = pCtx->cr0;
1430 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1431 {
1432 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1433 return EMSTATE_REM;
1434 }
1435
1436 if (pCtx->cr4 & X86_CR4_PAE)
1437 {
1438 uint32_t u32Dummy, u32Features;
1439
1440 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1441 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1442 return EMSTATE_REM;
1443 }
1444
1445 unsigned uSS = pCtx->ss.Sel;
1446 if ( pCtx->eflags.Bits.u1VM
1447 || (uSS & X86_SEL_RPL) == 3)
1448 {
1449 if (!EMIsRawRing3Enabled(pVM))
1450 return EMSTATE_REM;
1451
1452 if (!(EFlags.u32 & X86_EFL_IF))
1453 {
1454 Log2(("raw mode refused: IF (RawR3)\n"));
1455 return EMSTATE_REM;
1456 }
1457
1458 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1459 {
1460 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1461 return EMSTATE_REM;
1462 }
1463 }
1464 else
1465 {
1466 if (!EMIsRawRing0Enabled(pVM))
1467 return EMSTATE_REM;
1468
1469 if (EMIsRawRing1Enabled(pVM))
1470 {
1471 /* Only ring 0 and 1 supervisor code. */
1472 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1473 {
1474 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1475 return EMSTATE_REM;
1476 }
1477 }
1478 /* Only ring 0 supervisor code. */
1479 else if ((uSS & X86_SEL_RPL) != 0)
1480 {
1481 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1482 return EMSTATE_REM;
1483 }
1484
1485 // Let's start with pure 32 bits ring 0 code first
1486 /** @todo What's pure 32-bit mode? flat? */
1487 if ( !(pCtx->ss.Attr.n.u1DefBig)
1488 || !(pCtx->cs.Attr.n.u1DefBig))
1489 {
1490 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1491 return EMSTATE_REM;
1492 }
1493
1494 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1495 if (!(u32CR0 & X86_CR0_WP))
1496 {
1497 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1498 return EMSTATE_REM;
1499 }
1500
1501# ifdef VBOX_WITH_RAW_MODE
1502 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1503 {
1504 Log2(("raw r0 mode forced: patch code\n"));
1505# ifdef VBOX_WITH_SAFE_STR
1506 Assert(pCtx->tr.Sel);
1507# endif
1508 return EMSTATE_RAW;
1509 }
1510# endif /* VBOX_WITH_RAW_MODE */
1511
1512# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1513 if (!(EFlags.u32 & X86_EFL_IF))
1514 {
1515 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1516 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1517 return EMSTATE_REM;
1518 }
1519# endif
1520
1521# ifndef VBOX_WITH_RAW_RING1
1522 /** @todo still necessary??? */
1523 if (EFlags.Bits.u2IOPL != 0)
1524 {
1525 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1526 return EMSTATE_REM;
1527 }
1528# endif
1529 }
1530
1531 /*
1532 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1533 */
1534 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1535 {
1536 Log2(("raw mode refused: stale CS\n"));
1537 return EMSTATE_REM;
1538 }
1539 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1540 {
1541 Log2(("raw mode refused: stale SS\n"));
1542 return EMSTATE_REM;
1543 }
1544 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1545 {
1546 Log2(("raw mode refused: stale DS\n"));
1547 return EMSTATE_REM;
1548 }
1549 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1550 {
1551 Log2(("raw mode refused: stale ES\n"));
1552 return EMSTATE_REM;
1553 }
1554 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1555 {
1556 Log2(("raw mode refused: stale FS\n"));
1557 return EMSTATE_REM;
1558 }
1559 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1560 {
1561 Log2(("raw mode refused: stale GS\n"));
1562 return EMSTATE_REM;
1563 }
1564
1565# ifdef VBOX_WITH_SAFE_STR
1566 if (pCtx->tr.Sel == 0)
1567 {
1568 Log(("Raw mode refused -> TR=0\n"));
1569 return EMSTATE_REM;
1570 }
1571# endif
1572
1573 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1574 return EMSTATE_RAW;
1575}
1576
1577
1578/**
1579 * Executes all high priority post execution force actions.
1580 *
1581 * @returns rc or a fatal status code.
1582 *
1583 * @param pVM The cross context VM structure.
1584 * @param pVCpu The cross context virtual CPU structure.
1585 * @param rc The current rc.
1586 */
1587int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1588{
1589 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1590
1591 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1592 PDMCritSectBothFF(pVCpu);
1593
1594 /* Update CR3 (Nested Paging case for HM). */
1595 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1596 {
1597 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1598 if (RT_FAILURE(rc2))
1599 return rc2;
1600 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1601 }
1602
1603 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1604 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1605 {
1606 if (CPUMIsGuestInPAEMode(pVCpu))
1607 {
1608 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1609 AssertPtr(pPdpes);
1610
1611 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1612 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1613 }
1614 else
1615 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1616 }
1617
1618 /* IEM has pending work (typically memory write after INS instruction). */
1619 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1620 rc = VBOXSTRICTRC_TODO(IEMR3ProcessForceFlag(pVM, pVCpu, rc));
1621
1622 /* IOM has pending work (comitting an I/O or MMIO write). */
1623 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1624 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
1625
1626#ifdef VBOX_WITH_RAW_MODE
1627 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1628 CSAMR3DoPendingAction(pVM, pVCpu);
1629#endif
1630
1631 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1632 {
1633 if ( rc > VINF_EM_NO_MEMORY
1634 && rc <= VINF_EM_LAST)
1635 rc = VINF_EM_NO_MEMORY;
1636 }
1637
1638 return rc;
1639}
1640
1641
1642/**
1643 * Executes all pending forced actions.
1644 *
1645 * Forced actions can cause execution delays and execution
1646 * rescheduling. The first we deal with using action priority, so
1647 * that for instance pending timers aren't scheduled and ran until
1648 * right before execution. The rescheduling we deal with using
1649 * return codes. The same goes for VM termination, only in that case
1650 * we exit everything.
1651 *
1652 * @returns VBox status code of equal or greater importance/severity than rc.
1653 * The most important ones are: VINF_EM_RESCHEDULE,
1654 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1655 *
1656 * @param pVM The cross context VM structure.
1657 * @param pVCpu The cross context virtual CPU structure.
1658 * @param rc The current rc.
1659 *
1660 */
1661int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1662{
1663 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1664#ifdef VBOX_STRICT
1665 int rcIrq = VINF_SUCCESS;
1666#endif
1667 int rc2;
1668#define UPDATE_RC() \
1669 do { \
1670 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1671 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1672 break; \
1673 if (!rc || rc2 < rc) \
1674 rc = rc2; \
1675 } while (0)
1676 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1677
1678 /*
1679 * Post execution chunk first.
1680 */
1681 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1682 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1683 {
1684 /*
1685 * EMT Rendezvous (must be serviced before termination).
1686 */
1687 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1688 {
1689 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1690 UPDATE_RC();
1691 /** @todo HACK ALERT! The following test is to make sure EM+TM
1692 * thinks the VM is stopped/reset before the next VM state change
1693 * is made. We need a better solution for this, or at least make it
1694 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1695 * VINF_EM_SUSPEND). */
1696 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1697 {
1698 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1699 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1700 return rc;
1701 }
1702 }
1703
1704 /*
1705 * State change request (cleared by vmR3SetStateLocked).
1706 */
1707 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1708 {
1709 VMSTATE enmState = VMR3GetState(pVM);
1710 switch (enmState)
1711 {
1712 case VMSTATE_FATAL_ERROR:
1713 case VMSTATE_FATAL_ERROR_LS:
1714 case VMSTATE_GURU_MEDITATION:
1715 case VMSTATE_GURU_MEDITATION_LS:
1716 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1717 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1718 return VINF_EM_SUSPEND;
1719
1720 case VMSTATE_DESTROYING:
1721 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1722 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1723 return VINF_EM_TERMINATE;
1724
1725 default:
1726 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1727 }
1728 }
1729
1730 /*
1731 * Debugger Facility polling.
1732 */
1733 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1734 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1735 {
1736 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1737 UPDATE_RC();
1738 }
1739
1740 /*
1741 * Postponed reset request.
1742 */
1743 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1744 {
1745 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1746 UPDATE_RC();
1747 }
1748
1749#ifdef VBOX_WITH_RAW_MODE
1750 /*
1751 * CSAM page scanning.
1752 */
1753 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1754 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1755 {
1756 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1757
1758 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
1759 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1760
1761 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1762 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1763 }
1764#endif
1765
1766 /*
1767 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1768 */
1769 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1770 {
1771 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1772 UPDATE_RC();
1773 if (rc == VINF_EM_NO_MEMORY)
1774 return rc;
1775 }
1776
1777 /* check that we got them all */
1778 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1779 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1780 }
1781
1782 /*
1783 * Normal priority then.
1784 * (Executed in no particular order.)
1785 */
1786 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1787 {
1788 /*
1789 * PDM Queues are pending.
1790 */
1791 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1792 PDMR3QueueFlushAll(pVM);
1793
1794 /*
1795 * PDM DMA transfers are pending.
1796 */
1797 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1798 PDMR3DmaRun(pVM);
1799
1800 /*
1801 * EMT Rendezvous (make sure they are handled before the requests).
1802 */
1803 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1804 {
1805 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1806 UPDATE_RC();
1807 /** @todo HACK ALERT! The following test is to make sure EM+TM
1808 * thinks the VM is stopped/reset before the next VM state change
1809 * is made. We need a better solution for this, or at least make it
1810 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1811 * VINF_EM_SUSPEND). */
1812 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1813 {
1814 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1815 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1816 return rc;
1817 }
1818 }
1819
1820 /*
1821 * Requests from other threads.
1822 */
1823 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1824 {
1825 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1826 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1827 {
1828 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1829 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1830 return rc2;
1831 }
1832 UPDATE_RC();
1833 /** @todo HACK ALERT! The following test is to make sure EM+TM
1834 * thinks the VM is stopped/reset before the next VM state change
1835 * is made. We need a better solution for this, or at least make it
1836 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1837 * VINF_EM_SUSPEND). */
1838 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1839 {
1840 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1841 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1842 return rc;
1843 }
1844 }
1845
1846#ifdef VBOX_WITH_REM
1847 /* Replay the handler notification changes. */
1848 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1849 {
1850 /* Try not to cause deadlocks. */
1851 if ( pVM->cCpus == 1
1852 || ( !PGMIsLockOwner(pVM)
1853 && !IOMIsLockWriteOwner(pVM))
1854 )
1855 {
1856 EMRemLock(pVM);
1857 REMR3ReplayHandlerNotifications(pVM);
1858 EMRemUnlock(pVM);
1859 }
1860 }
1861#endif
1862
1863 /* check that we got them all */
1864 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1865 }
1866
1867 /*
1868 * Normal priority then. (per-VCPU)
1869 * (Executed in no particular order.)
1870 */
1871 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1872 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1873 {
1874 /*
1875 * Requests from other threads.
1876 */
1877 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1878 {
1879 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1880 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1881 {
1882 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1883 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1884 return rc2;
1885 }
1886 UPDATE_RC();
1887 /** @todo HACK ALERT! The following test is to make sure EM+TM
1888 * thinks the VM is stopped/reset before the next VM state change
1889 * is made. We need a better solution for this, or at least make it
1890 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1891 * VINF_EM_SUSPEND). */
1892 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1893 {
1894 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1895 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1896 return rc;
1897 }
1898 }
1899
1900 /*
1901 * Forced unhalting of EMT.
1902 */
1903 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
1904 {
1905 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
1906 if (rc == VINF_EM_HALT)
1907 rc = VINF_EM_RESCHEDULE;
1908 else
1909 {
1910 rc2 = VINF_EM_RESCHEDULE;
1911 UPDATE_RC();
1912 }
1913 }
1914
1915 /* check that we got them all */
1916 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST | VMCPU_FF_UNHALT)));
1917 }
1918
1919 /*
1920 * High priority pre execution chunk last.
1921 * (Executed in ascending priority order.)
1922 */
1923 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1924 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1925 {
1926 /*
1927 * Timers before interrupts.
1928 */
1929 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1930 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1931 TMR3TimerQueuesDo(pVM);
1932
1933 /*
1934 * Pick up asynchronously posted interrupts into the APIC.
1935 */
1936 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1937 APICUpdatePendingInterrupts(pVCpu);
1938
1939 /*
1940 * The instruction following an emulated STI should *always* be executed!
1941 *
1942 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1943 * the eip is the same as the inhibited instr address. Before we
1944 * are able to execute this instruction in raw mode (iret to
1945 * guest code) an external interrupt might force a world switch
1946 * again. Possibly allowing a guest interrupt to be dispatched
1947 * in the process. This could break the guest. Sounds very
1948 * unlikely, but such timing sensitive problem are not as rare as
1949 * you might think.
1950 */
1951 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1952 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1953 {
1954 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1955 {
1956 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1957 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1958 }
1959 else
1960 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1961 }
1962
1963 /*
1964 * Interrupts.
1965 */
1966 bool fWakeupPending = false;
1967 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1968 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1969 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1970 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1971#ifdef VBOX_WITH_RAW_MODE
1972 && PATMAreInterruptsEnabled(pVM)
1973#else
1974 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1975#endif
1976 && !HMR3IsEventPending(pVCpu))
1977 {
1978 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1979 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1980 {
1981 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1982 /** @todo this really isn't nice, should properly handle this */
1983 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1984 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1985 rc2 = VINF_EM_RESCHEDULE;
1986#ifdef VBOX_STRICT
1987 rcIrq = rc2;
1988#endif
1989 UPDATE_RC();
1990 /* Reschedule required: We must not miss the wakeup below! */
1991 fWakeupPending = true;
1992 }
1993 }
1994
1995 /*
1996 * Allocate handy pages.
1997 */
1998 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1999 {
2000 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2001 UPDATE_RC();
2002 }
2003
2004 /*
2005 * Debugger Facility request.
2006 */
2007 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2008 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2009 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2010 {
2011 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2012 UPDATE_RC();
2013 }
2014
2015 /*
2016 * EMT Rendezvous (must be serviced before termination).
2017 */
2018 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2019 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2020 {
2021 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2022 UPDATE_RC();
2023 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2024 * stopped/reset before the next VM state change is made. We need a better
2025 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2026 * && rc >= VINF_EM_SUSPEND). */
2027 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2028 {
2029 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2030 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2031 return rc;
2032 }
2033 }
2034
2035 /*
2036 * State change request (cleared by vmR3SetStateLocked).
2037 */
2038 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2039 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2040 {
2041 VMSTATE enmState = VMR3GetState(pVM);
2042 switch (enmState)
2043 {
2044 case VMSTATE_FATAL_ERROR:
2045 case VMSTATE_FATAL_ERROR_LS:
2046 case VMSTATE_GURU_MEDITATION:
2047 case VMSTATE_GURU_MEDITATION_LS:
2048 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2049 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2050 return VINF_EM_SUSPEND;
2051
2052 case VMSTATE_DESTROYING:
2053 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2054 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2055 return VINF_EM_TERMINATE;
2056
2057 default:
2058 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2059 }
2060 }
2061
2062 /*
2063 * Out of memory? Since most of our fellow high priority actions may cause us
2064 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2065 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2066 * than us since we can terminate without allocating more memory.
2067 */
2068 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2069 {
2070 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2071 UPDATE_RC();
2072 if (rc == VINF_EM_NO_MEMORY)
2073 return rc;
2074 }
2075
2076 /*
2077 * If the virtual sync clock is still stopped, make TM restart it.
2078 */
2079 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2080 TMR3VirtualSyncFF(pVM, pVCpu);
2081
2082#ifdef DEBUG
2083 /*
2084 * Debug, pause the VM.
2085 */
2086 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2087 {
2088 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2089 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2090 return VINF_EM_SUSPEND;
2091 }
2092#endif
2093
2094 /* check that we got them all */
2095 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2096 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2097 }
2098
2099#undef UPDATE_RC
2100 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2101 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2102 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2103 return rc;
2104}
2105
2106
2107/**
2108 * Check if the preset execution time cap restricts guest execution scheduling.
2109 *
2110 * @returns true if allowed, false otherwise
2111 * @param pVM The cross context VM structure.
2112 * @param pVCpu The cross context virtual CPU structure.
2113 */
2114bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2115{
2116 uint64_t u64UserTime, u64KernelTime;
2117
2118 if ( pVM->uCpuExecutionCap != 100
2119 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2120 {
2121 uint64_t u64TimeNow = RTTimeMilliTS();
2122 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2123 {
2124 /* New time slice. */
2125 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2126 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2127 pVCpu->em.s.u64TimeSliceExec = 0;
2128 }
2129 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2130
2131 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2132 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2133 return false;
2134 }
2135 return true;
2136}
2137
2138
2139/**
2140 * Execute VM.
2141 *
2142 * This function is the main loop of the VM. The emulation thread
2143 * calls this function when the VM has been successfully constructed
2144 * and we're ready for executing the VM.
2145 *
2146 * Returning from this function means that the VM is turned off or
2147 * suspended (state already saved) and deconstruction is next in line.
2148 *
2149 * All interaction from other thread are done using forced actions
2150 * and signaling of the wait object.
2151 *
2152 * @returns VBox status code, informational status codes may indicate failure.
2153 * @param pVM The cross context VM structure.
2154 * @param pVCpu The cross context virtual CPU structure.
2155 */
2156VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2157{
2158 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2159 pVM,
2160 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2161 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2162 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2163 pVCpu->em.s.fForceRAW));
2164 VM_ASSERT_EMT(pVM);
2165 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2166 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2167 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2168 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2169
2170 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2171 if (rc == 0)
2172 {
2173 /*
2174 * Start the virtual time.
2175 */
2176 TMR3NotifyResume(pVM, pVCpu);
2177
2178 /*
2179 * The Outer Main Loop.
2180 */
2181 bool fFFDone = false;
2182
2183 /* Reschedule right away to start in the right state. */
2184 rc = VINF_SUCCESS;
2185
2186 /* If resuming after a pause or a state load, restore the previous
2187 state or else we'll start executing code. Else, just reschedule. */
2188 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2189 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2190 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2191 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2192 else
2193 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2194 pVCpu->em.s.cIemThenRemInstructions = 0;
2195 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2196
2197 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2198 for (;;)
2199 {
2200 /*
2201 * Before we can schedule anything (we're here because
2202 * scheduling is required) we must service any pending
2203 * forced actions to avoid any pending action causing
2204 * immediate rescheduling upon entering an inner loop
2205 *
2206 * Do forced actions.
2207 */
2208 if ( !fFFDone
2209 && RT_SUCCESS(rc)
2210 && rc != VINF_EM_TERMINATE
2211 && rc != VINF_EM_OFF
2212 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2213 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2214 {
2215 rc = emR3ForcedActions(pVM, pVCpu, rc);
2216 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2217 if ( ( rc == VINF_EM_RESCHEDULE_REM
2218 || rc == VINF_EM_RESCHEDULE_HM)
2219 && pVCpu->em.s.fForceRAW)
2220 rc = VINF_EM_RESCHEDULE_RAW;
2221 }
2222 else if (fFFDone)
2223 fFFDone = false;
2224
2225 /*
2226 * Now what to do?
2227 */
2228 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2229 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2230 switch (rc)
2231 {
2232 /*
2233 * Keep doing what we're currently doing.
2234 */
2235 case VINF_SUCCESS:
2236 break;
2237
2238 /*
2239 * Reschedule - to raw-mode execution.
2240 */
2241 case VINF_EM_RESCHEDULE_RAW:
2242 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2243 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2244 pVCpu->em.s.enmState = EMSTATE_RAW;
2245 break;
2246
2247 /*
2248 * Reschedule - to hardware accelerated raw-mode execution.
2249 */
2250 case VINF_EM_RESCHEDULE_HM:
2251 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2252 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2253 Assert(!pVCpu->em.s.fForceRAW);
2254 pVCpu->em.s.enmState = EMSTATE_HM;
2255 break;
2256
2257 /*
2258 * Reschedule - to recompiled execution.
2259 */
2260 case VINF_EM_RESCHEDULE_REM:
2261 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2262 if (HMIsEnabled(pVM))
2263 {
2264 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2265 enmOldState, EMSTATE_IEM_THEN_REM));
2266 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2267 {
2268 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2269 pVCpu->em.s.cIemThenRemInstructions = 0;
2270 }
2271 }
2272 else
2273 {
2274 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2275 pVCpu->em.s.enmState = EMSTATE_REM;
2276 }
2277 break;
2278
2279 /*
2280 * Resume.
2281 */
2282 case VINF_EM_RESUME:
2283 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2284 /* Don't reschedule in the halted or wait for SIPI case. */
2285 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2286 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2287 {
2288 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2289 break;
2290 }
2291 /* fall through and get scheduled. */
2292
2293 /*
2294 * Reschedule.
2295 */
2296 case VINF_EM_RESCHEDULE:
2297 {
2298 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2299 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2300 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2301 pVCpu->em.s.cIemThenRemInstructions = 0;
2302 pVCpu->em.s.enmState = enmState;
2303 break;
2304 }
2305
2306 /*
2307 * Halted.
2308 */
2309 case VINF_EM_HALT:
2310 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2311 pVCpu->em.s.enmState = EMSTATE_HALTED;
2312 break;
2313
2314 /*
2315 * Switch to the wait for SIPI state (application processor only)
2316 */
2317 case VINF_EM_WAIT_SIPI:
2318 Assert(pVCpu->idCpu != 0);
2319 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2320 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2321 break;
2322
2323
2324 /*
2325 * Suspend.
2326 */
2327 case VINF_EM_SUSPEND:
2328 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2329 Assert(enmOldState != EMSTATE_SUSPENDED);
2330 pVCpu->em.s.enmPrevState = enmOldState;
2331 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2332 break;
2333
2334 /*
2335 * Reset.
2336 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2337 */
2338 case VINF_EM_RESET:
2339 {
2340 if (pVCpu->idCpu == 0)
2341 {
2342 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2343 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2344 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2345 pVCpu->em.s.cIemThenRemInstructions = 0;
2346 pVCpu->em.s.enmState = enmState;
2347 }
2348 else
2349 {
2350 /* All other VCPUs go into the wait for SIPI state. */
2351 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2352 }
2353 break;
2354 }
2355
2356 /*
2357 * Power Off.
2358 */
2359 case VINF_EM_OFF:
2360 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2361 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2362 TMR3NotifySuspend(pVM, pVCpu);
2363 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2364 return rc;
2365
2366 /*
2367 * Terminate the VM.
2368 */
2369 case VINF_EM_TERMINATE:
2370 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2371 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2372 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2373 TMR3NotifySuspend(pVM, pVCpu);
2374 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2375 return rc;
2376
2377
2378 /*
2379 * Out of memory, suspend the VM and stuff.
2380 */
2381 case VINF_EM_NO_MEMORY:
2382 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2383 Assert(enmOldState != EMSTATE_SUSPENDED);
2384 pVCpu->em.s.enmPrevState = enmOldState;
2385 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2386 TMR3NotifySuspend(pVM, pVCpu);
2387 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2388
2389 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2390 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2391 if (rc != VINF_EM_SUSPEND)
2392 {
2393 if (RT_SUCCESS_NP(rc))
2394 {
2395 AssertLogRelMsgFailed(("%Rrc\n", rc));
2396 rc = VERR_EM_INTERNAL_ERROR;
2397 }
2398 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2399 }
2400 return rc;
2401
2402 /*
2403 * Guest debug events.
2404 */
2405 case VINF_EM_DBG_STEPPED:
2406 case VINF_EM_DBG_STOP:
2407 case VINF_EM_DBG_EVENT:
2408 case VINF_EM_DBG_BREAKPOINT:
2409 case VINF_EM_DBG_STEP:
2410 if (enmOldState == EMSTATE_RAW)
2411 {
2412 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2413 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2414 }
2415 else if (enmOldState == EMSTATE_HM)
2416 {
2417 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2418 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2419 }
2420 else if (enmOldState == EMSTATE_REM)
2421 {
2422 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2423 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2424 }
2425 else
2426 {
2427 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2428 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2429 }
2430 break;
2431
2432 /*
2433 * Hypervisor debug events.
2434 */
2435 case VINF_EM_DBG_HYPER_STEPPED:
2436 case VINF_EM_DBG_HYPER_BREAKPOINT:
2437 case VINF_EM_DBG_HYPER_ASSERTION:
2438 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2439 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2440 break;
2441
2442 /*
2443 * Triple fault.
2444 */
2445 case VINF_EM_TRIPLE_FAULT:
2446 if (!pVM->em.s.fGuruOnTripleFault)
2447 {
2448 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2449 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2450 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2451 continue;
2452 }
2453 /* Else fall through and trigger a guru. */
2454 case VERR_VMM_RING0_ASSERTION:
2455 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2456 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2457 break;
2458
2459 /*
2460 * Any error code showing up here other than the ones we
2461 * know and process above are considered to be FATAL.
2462 *
2463 * Unknown warnings and informational status codes are also
2464 * included in this.
2465 */
2466 default:
2467 if (RT_SUCCESS_NP(rc))
2468 {
2469 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2470 rc = VERR_EM_INTERNAL_ERROR;
2471 }
2472 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2473 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2474 break;
2475 }
2476
2477 /*
2478 * Act on state transition.
2479 */
2480 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2481 if (enmOldState != enmNewState)
2482 {
2483 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2484
2485 /* Clear MWait flags. */
2486 if ( enmOldState == EMSTATE_HALTED
2487 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2488 && ( enmNewState == EMSTATE_RAW
2489 || enmNewState == EMSTATE_HM
2490 || enmNewState == EMSTATE_REM
2491 || enmNewState == EMSTATE_IEM_THEN_REM
2492 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2493 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2494 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2495 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2496 {
2497 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2498 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2499 }
2500 }
2501 else
2502 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2503
2504 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2505 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2506
2507 /*
2508 * Act on the new state.
2509 */
2510 switch (enmNewState)
2511 {
2512 /*
2513 * Execute raw.
2514 */
2515 case EMSTATE_RAW:
2516#ifdef VBOX_WITH_RAW_MODE
2517 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2518#else
2519 AssertLogRelMsgFailed(("%Rrc\n", rc));
2520 rc = VERR_EM_INTERNAL_ERROR;
2521#endif
2522 break;
2523
2524 /*
2525 * Execute hardware accelerated raw.
2526 */
2527 case EMSTATE_HM:
2528 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2529 break;
2530
2531 /*
2532 * Execute recompiled.
2533 */
2534 case EMSTATE_REM:
2535 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2536 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2537 break;
2538
2539 /*
2540 * Execute in the interpreter.
2541 */
2542 case EMSTATE_IEM:
2543 {
2544#if 0 /* For testing purposes. */
2545 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2546 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2547 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2548 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2549 rc = VINF_SUCCESS;
2550 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2551#endif
2552 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2553 if (pVM->em.s.fIemExecutesAll)
2554 {
2555 Assert(rc != VINF_EM_RESCHEDULE_REM);
2556 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2557 Assert(rc != VINF_EM_RESCHEDULE_HM);
2558 }
2559 fFFDone = false;
2560 break;
2561 }
2562
2563 /*
2564 * Execute in IEM, hoping we can quickly switch aback to HM
2565 * or RAW execution. If our hopes fail, we go to REM.
2566 */
2567 case EMSTATE_IEM_THEN_REM:
2568 {
2569 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2570 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2571 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2572 break;
2573 }
2574
2575 /*
2576 * Application processor execution halted until SIPI.
2577 */
2578 case EMSTATE_WAIT_SIPI:
2579 /* no break */
2580 /*
2581 * hlt - execution halted until interrupt.
2582 */
2583 case EMSTATE_HALTED:
2584 {
2585 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2586 /* If HM (or someone else) store a pending interrupt in
2587 TRPM, it must be dispatched ASAP without any halting.
2588 Anything pending in TRPM has been accepted and the CPU
2589 should already be the right state to receive it. */
2590 if (TRPMHasTrap(pVCpu))
2591 rc = VINF_EM_RESCHEDULE;
2592 /* MWAIT has a special extension where it's woken up when
2593 an interrupt is pending even when IF=0. */
2594 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2595 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2596 {
2597 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2598 if (rc == VINF_SUCCESS)
2599 {
2600 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2601 APICUpdatePendingInterrupts(pVCpu);
2602
2603 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2604 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2605 {
2606 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2607 rc = VINF_EM_RESCHEDULE;
2608 }
2609 }
2610 }
2611 else
2612 {
2613 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2614 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2615 check VMCPU_FF_UPDATE_APIC here. */
2616 if ( rc == VINF_SUCCESS
2617 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2618 {
2619 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2620 rc = VINF_EM_RESCHEDULE;
2621 }
2622 }
2623
2624 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2625 break;
2626 }
2627
2628 /*
2629 * Suspended - return to VM.cpp.
2630 */
2631 case EMSTATE_SUSPENDED:
2632 TMR3NotifySuspend(pVM, pVCpu);
2633 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2634 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2635 return VINF_EM_SUSPEND;
2636
2637 /*
2638 * Debugging in the guest.
2639 */
2640 case EMSTATE_DEBUG_GUEST_RAW:
2641 case EMSTATE_DEBUG_GUEST_HM:
2642 case EMSTATE_DEBUG_GUEST_IEM:
2643 case EMSTATE_DEBUG_GUEST_REM:
2644 TMR3NotifySuspend(pVM, pVCpu);
2645 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2646 TMR3NotifyResume(pVM, pVCpu);
2647 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2648 break;
2649
2650 /*
2651 * Debugging in the hypervisor.
2652 */
2653 case EMSTATE_DEBUG_HYPER:
2654 {
2655 TMR3NotifySuspend(pVM, pVCpu);
2656 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2657
2658 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2659 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2660 if (rc != VINF_SUCCESS)
2661 {
2662 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2663 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2664 else
2665 {
2666 /* switch to guru meditation mode */
2667 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2668 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2669 VMMR3FatalDump(pVM, pVCpu, rc);
2670 }
2671 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2672 return rc;
2673 }
2674
2675 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2676 TMR3NotifyResume(pVM, pVCpu);
2677 break;
2678 }
2679
2680 /*
2681 * Guru meditation takes place in the debugger.
2682 */
2683 case EMSTATE_GURU_MEDITATION:
2684 {
2685 TMR3NotifySuspend(pVM, pVCpu);
2686 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2687 VMMR3FatalDump(pVM, pVCpu, rc);
2688 emR3Debug(pVM, pVCpu, rc);
2689 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2690 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2691 return rc;
2692 }
2693
2694 /*
2695 * The states we don't expect here.
2696 */
2697 case EMSTATE_NONE:
2698 case EMSTATE_TERMINATING:
2699 default:
2700 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2701 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2702 TMR3NotifySuspend(pVM, pVCpu);
2703 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2704 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2705 return VERR_EM_INTERNAL_ERROR;
2706 }
2707 } /* The Outer Main Loop */
2708 }
2709 else
2710 {
2711 /*
2712 * Fatal error.
2713 */
2714 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2715 TMR3NotifySuspend(pVM, pVCpu);
2716 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2717 VMMR3FatalDump(pVM, pVCpu, rc);
2718 emR3Debug(pVM, pVCpu, rc);
2719 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2720 /** @todo change the VM state! */
2721 return rc;
2722 }
2723
2724 /* not reached */
2725}
2726
2727/**
2728 * Notify EM of a state change (used by FTM)
2729 *
2730 * @param pVM The cross context VM structure.
2731 */
2732VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2733{
2734 PVMCPU pVCpu = VMMGetCpu(pVM);
2735
2736 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2737 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2738 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2739 return VINF_SUCCESS;
2740}
2741
2742/**
2743 * Notify EM of a state change (used by FTM)
2744 *
2745 * @param pVM The cross context VM structure.
2746 */
2747VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2748{
2749 PVMCPU pVCpu = VMMGetCpu(pVM);
2750 EMSTATE enmCurState = pVCpu->em.s.enmState;
2751
2752 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2753 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2754 pVCpu->em.s.enmPrevState = enmCurState;
2755 return VINF_SUCCESS;
2756}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette