VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 72440

Last change on this file since 72440 was 72440, checked in by vboxsync, 7 years ago

VMM: Nested hw.virt: Fixes when nested-paging isn't enabled in the outer guest.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 134.9 KB
Line 
1/* $Id: EM.cpp 72440 2018-06-05 05:45:11Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/nem.h>
47#include <VBox/vmm/iom.h>
48#include <VBox/vmm/dbgf.h>
49#include <VBox/vmm/pgm.h>
50#ifdef VBOX_WITH_REM
51# include <VBox/vmm/rem.h>
52#endif
53#include <VBox/vmm/apic.h>
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/mm.h>
56#include <VBox/vmm/ssm.h>
57#include <VBox/vmm/pdmapi.h>
58#include <VBox/vmm/pdmcritsect.h>
59#include <VBox/vmm/pdmqueue.h>
60#include <VBox/vmm/hm.h>
61#include <VBox/vmm/patm.h>
62#include "EMInternal.h"
63#include <VBox/vmm/vm.h>
64#include <VBox/vmm/uvm.h>
65#include <VBox/vmm/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include "VMMTracing.h"
69
70#include <iprt/asm.h>
71#include <iprt/string.h>
72#include <iprt/stream.h>
73#include <iprt/thread.h>
74
75
76/*********************************************************************************************************************************
77* Defined Constants And Macros *
78*********************************************************************************************************************************/
79#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
80#define EM_NOTIFY_HM
81#endif
82
83
84/*********************************************************************************************************************************
85* Internal Functions *
86*********************************************************************************************************************************/
87static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
88static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
89#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
90static const char *emR3GetStateName(EMSTATE enmState);
91#endif
92static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
93#if defined(VBOX_WITH_REM) || defined(DEBUG)
94static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
95#endif
96static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
97int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
152 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
153
154#ifdef VBOX_WITH_REM
155 /*
156 * Initialize the REM critical section.
157 */
158 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
159 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
160 AssertRCReturn(rc, rc);
161#endif
162
163 /*
164 * Saved state.
165 */
166 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
167 NULL, NULL, NULL,
168 NULL, emR3Save, NULL,
169 NULL, emR3Load, NULL);
170 if (RT_FAILURE(rc))
171 return rc;
172
173 for (VMCPUID i = 0; i < pVM->cCpus; i++)
174 {
175 PVMCPU pVCpu = &pVM->aCpus[i];
176
177 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
178 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
179 pVCpu->em.s.fForceRAW = false;
180
181 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
182#ifdef VBOX_WITH_RAW_MODE
183 if (VM_IS_RAW_MODE_ENABLED(pVM))
184 {
185 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
186 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
187 }
188#endif
189
190 /* Force reset of the time slice. */
191 pVCpu->em.s.u64TimeSliceStart = 0;
192
193# define EM_REG_COUNTER(a, b, c) \
194 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
195 AssertRC(rc);
196
197# define EM_REG_COUNTER_USED(a, b, c) \
198 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
199 AssertRC(rc);
200
201# define EM_REG_PROFILE(a, b, c) \
202 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
203 AssertRC(rc);
204
205# define EM_REG_PROFILE_ADV(a, b, c) \
206 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
207 AssertRC(rc);
208
209 /*
210 * Statistics.
211 */
212#ifdef VBOX_WITH_STATISTICS
213 PEMSTATS pStats;
214 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 pVCpu->em.s.pStatsR3 = pStats;
219 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
220 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
221
222 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
223 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
224
225 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
227
228 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
302
303 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
304 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
305
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
358
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
387
388 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
390 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
392
393 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
395 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
398 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
421 pVCpu->em.s.pCliStatTree = 0;
422
423 /* these should be considered for release statistics. */
424 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
425 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
428 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
431 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
432#endif /* VBOX_WITH_STATISTICS */
433 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
434 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
435#ifdef VBOX_WITH_STATISTICS
436 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
437 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
438 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
439 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
440 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
441 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
442#endif /* VBOX_WITH_STATISTICS */
443
444 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
445 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
446 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
447 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
448 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
449
450 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
451 }
452
453 emR3InitDbg(pVM);
454 return VINF_SUCCESS;
455}
456
457
458/**
459 * Applies relocations to data and code managed by this
460 * component. This function will be called at init and
461 * whenever the VMM need to relocate it self inside the GC.
462 *
463 * @param pVM The cross context VM structure.
464 */
465VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
466{
467 LogFlow(("EMR3Relocate\n"));
468 for (VMCPUID i = 0; i < pVM->cCpus; i++)
469 {
470 PVMCPU pVCpu = &pVM->aCpus[i];
471 if (pVCpu->em.s.pStatsR3)
472 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
473 }
474}
475
476
477/**
478 * Reset the EM state for a CPU.
479 *
480 * Called by EMR3Reset and hot plugging.
481 *
482 * @param pVCpu The cross context virtual CPU structure.
483 */
484VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
485{
486 /* Reset scheduling state. */
487 pVCpu->em.s.fForceRAW = false;
488 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
489
490 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
491 out of the HALTED state here so that enmPrevState doesn't end up as
492 HALTED when EMR3Execute returns. */
493 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
494 {
495 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
496 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
497 }
498}
499
500
501/**
502 * Reset notification.
503 *
504 * @param pVM The cross context VM structure.
505 */
506VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
507{
508 Log(("EMR3Reset: \n"));
509 for (VMCPUID i = 0; i < pVM->cCpus; i++)
510 EMR3ResetCpu(&pVM->aCpus[i]);
511}
512
513
514/**
515 * Terminates the EM.
516 *
517 * Termination means cleaning up and freeing all resources,
518 * the VM it self is at this point powered off or suspended.
519 *
520 * @returns VBox status code.
521 * @param pVM The cross context VM structure.
522 */
523VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
524{
525 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
526
527#ifdef VBOX_WITH_REM
528 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
529#else
530 RT_NOREF(pVM);
531#endif
532 return VINF_SUCCESS;
533}
534
535
536/**
537 * Execute state save operation.
538 *
539 * @returns VBox status code.
540 * @param pVM The cross context VM structure.
541 * @param pSSM SSM operation handle.
542 */
543static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
544{
545 for (VMCPUID i = 0; i < pVM->cCpus; i++)
546 {
547 PVMCPU pVCpu = &pVM->aCpus[i];
548
549 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
550 AssertRCReturn(rc, rc);
551
552 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
553 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
554 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
555 AssertRCReturn(rc, rc);
556
557 /* Save mwait state. */
558 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
559 AssertRCReturn(rc, rc);
560 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
561 AssertRCReturn(rc, rc);
562 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
563 AssertRCReturn(rc, rc);
564 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
565 AssertRCReturn(rc, rc);
566 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
567 AssertRCReturn(rc, rc);
568 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
569 AssertRCReturn(rc, rc);
570 }
571 return VINF_SUCCESS;
572}
573
574
575/**
576 * Execute state load operation.
577 *
578 * @returns VBox status code.
579 * @param pVM The cross context VM structure.
580 * @param pSSM SSM operation handle.
581 * @param uVersion Data layout version.
582 * @param uPass The data pass.
583 */
584static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
585{
586 /*
587 * Validate version.
588 */
589 if ( uVersion > EM_SAVED_STATE_VERSION
590 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
591 {
592 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
593 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
594 }
595 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
596
597 /*
598 * Load the saved state.
599 */
600 for (VMCPUID i = 0; i < pVM->cCpus; i++)
601 {
602 PVMCPU pVCpu = &pVM->aCpus[i];
603
604 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
605 if (RT_FAILURE(rc))
606 pVCpu->em.s.fForceRAW = false;
607 AssertRCReturn(rc, rc);
608
609 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
610 {
611 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
612 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
613 AssertRCReturn(rc, rc);
614 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
615
616 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
617 }
618 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
619 {
620 /* Load mwait state. */
621 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
622 AssertRCReturn(rc, rc);
623 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
624 AssertRCReturn(rc, rc);
625 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
626 AssertRCReturn(rc, rc);
627 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
628 AssertRCReturn(rc, rc);
629 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
630 AssertRCReturn(rc, rc);
631 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
632 AssertRCReturn(rc, rc);
633 }
634
635 Assert(!pVCpu->em.s.pCliStatTree);
636 }
637 return VINF_SUCCESS;
638}
639
640
641/**
642 * Argument packet for emR3SetExecutionPolicy.
643 */
644struct EMR3SETEXECPOLICYARGS
645{
646 EMEXECPOLICY enmPolicy;
647 bool fEnforce;
648};
649
650
651/**
652 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
653 */
654static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
655{
656 /*
657 * Only the first CPU changes the variables.
658 */
659 if (pVCpu->idCpu == 0)
660 {
661 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
662 switch (pArgs->enmPolicy)
663 {
664 case EMEXECPOLICY_RECOMPILE_RING0:
665 pVM->fRecompileSupervisor = pArgs->fEnforce;
666 break;
667 case EMEXECPOLICY_RECOMPILE_RING3:
668 pVM->fRecompileUser = pArgs->fEnforce;
669 break;
670 case EMEXECPOLICY_IEM_ALL:
671 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
672 break;
673 default:
674 AssertFailedReturn(VERR_INVALID_PARAMETER);
675 }
676 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
677 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
678 }
679
680 /*
681 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
682 */
683 return pVCpu->em.s.enmState == EMSTATE_RAW
684 || pVCpu->em.s.enmState == EMSTATE_HM
685 || pVCpu->em.s.enmState == EMSTATE_NEM
686 || pVCpu->em.s.enmState == EMSTATE_IEM
687 || pVCpu->em.s.enmState == EMSTATE_REM
688 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
689 ? VINF_EM_RESCHEDULE
690 : VINF_SUCCESS;
691}
692
693
694/**
695 * Changes an execution scheduling policy parameter.
696 *
697 * This is used to enable or disable raw-mode / hardware-virtualization
698 * execution of user and supervisor code.
699 *
700 * @returns VINF_SUCCESS on success.
701 * @returns VINF_RESCHEDULE if a rescheduling might be required.
702 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
703 *
704 * @param pUVM The user mode VM handle.
705 * @param enmPolicy The scheduling policy to change.
706 * @param fEnforce Whether to enforce the policy or not.
707 */
708VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
709{
710 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
711 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
712 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
713
714 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
715 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
716}
717
718
719/**
720 * Queries an execution scheduling policy parameter.
721 *
722 * @returns VBox status code
723 * @param pUVM The user mode VM handle.
724 * @param enmPolicy The scheduling policy to query.
725 * @param pfEnforced Where to return the current value.
726 */
727VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
728{
729 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
730 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
731 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
732 PVM pVM = pUVM->pVM;
733 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
734
735 /* No need to bother EMTs with a query. */
736 switch (enmPolicy)
737 {
738 case EMEXECPOLICY_RECOMPILE_RING0:
739 *pfEnforced = pVM->fRecompileSupervisor;
740 break;
741 case EMEXECPOLICY_RECOMPILE_RING3:
742 *pfEnforced = pVM->fRecompileUser;
743 break;
744 case EMEXECPOLICY_IEM_ALL:
745 *pfEnforced = pVM->em.s.fIemExecutesAll;
746 break;
747 default:
748 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
749 }
750
751 return VINF_SUCCESS;
752}
753
754
755/**
756 * Queries the main execution engine of the VM.
757 *
758 * @returns VBox status code
759 * @param pUVM The user mode VM handle.
760 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
761 */
762VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
763{
764 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
765 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
766
767 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
768 PVM pVM = pUVM->pVM;
769 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
770
771 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
772 return VINF_SUCCESS;
773}
774
775
776/**
777 * Raise a fatal error.
778 *
779 * Safely terminate the VM with full state report and stuff. This function
780 * will naturally never return.
781 *
782 * @param pVCpu The cross context virtual CPU structure.
783 * @param rc VBox status code.
784 */
785VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
786{
787 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
788 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
789}
790
791
792#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
793/**
794 * Gets the EM state name.
795 *
796 * @returns pointer to read only state name,
797 * @param enmState The state.
798 */
799static const char *emR3GetStateName(EMSTATE enmState)
800{
801 switch (enmState)
802 {
803 case EMSTATE_NONE: return "EMSTATE_NONE";
804 case EMSTATE_RAW: return "EMSTATE_RAW";
805 case EMSTATE_HM: return "EMSTATE_HM";
806 case EMSTATE_IEM: return "EMSTATE_IEM";
807 case EMSTATE_REM: return "EMSTATE_REM";
808 case EMSTATE_HALTED: return "EMSTATE_HALTED";
809 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
810 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
811 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
812 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
813 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
814 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
815 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
816 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
817 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
818 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
819 case EMSTATE_NEM: return "EMSTATE_NEM";
820 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
821 default: return "Unknown!";
822 }
823}
824#endif /* LOG_ENABLED || VBOX_STRICT */
825
826
827/**
828 * Debug loop.
829 *
830 * @returns VBox status code for EM.
831 * @param pVM The cross context VM structure.
832 * @param pVCpu The cross context virtual CPU structure.
833 * @param rc Current EM VBox status code.
834 */
835static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
836{
837 for (;;)
838 {
839 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
840 const VBOXSTRICTRC rcLast = rc;
841
842 /*
843 * Debug related RC.
844 */
845 switch (VBOXSTRICTRC_VAL(rc))
846 {
847 /*
848 * Single step an instruction.
849 */
850 case VINF_EM_DBG_STEP:
851 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
852 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
853 || pVCpu->em.s.fForceRAW /* paranoia */)
854#ifdef VBOX_WITH_RAW_MODE
855 rc = emR3RawStep(pVM, pVCpu);
856#else
857 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
858#endif
859 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
860 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
861 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
862 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
863#ifdef VBOX_WITH_REM
864 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
865 rc = emR3RemStep(pVM, pVCpu);
866#endif
867 else
868 {
869 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
870 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
871 rc = VINF_EM_DBG_STEPPED;
872 }
873 break;
874
875 /*
876 * Simple events: stepped, breakpoint, stop/assertion.
877 */
878 case VINF_EM_DBG_STEPPED:
879 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
880 break;
881
882 case VINF_EM_DBG_BREAKPOINT:
883 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
884 break;
885
886 case VINF_EM_DBG_STOP:
887 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
888 break;
889
890 case VINF_EM_DBG_EVENT:
891 rc = DBGFR3EventHandlePending(pVM, pVCpu);
892 break;
893
894 case VINF_EM_DBG_HYPER_STEPPED:
895 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
896 break;
897
898 case VINF_EM_DBG_HYPER_BREAKPOINT:
899 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
900 break;
901
902 case VINF_EM_DBG_HYPER_ASSERTION:
903 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
904 RTLogFlush(NULL);
905 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
906 break;
907
908 /*
909 * Guru meditation.
910 */
911 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
912 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
913 break;
914 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
915 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
916 break;
917 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
918 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
919 break;
920
921 default: /** @todo don't use default for guru, but make special errors code! */
922 {
923 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
924 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
925 break;
926 }
927 }
928
929 /*
930 * Process the result.
931 */
932 switch (VBOXSTRICTRC_VAL(rc))
933 {
934 /*
935 * Continue the debugging loop.
936 */
937 case VINF_EM_DBG_STEP:
938 case VINF_EM_DBG_STOP:
939 case VINF_EM_DBG_EVENT:
940 case VINF_EM_DBG_STEPPED:
941 case VINF_EM_DBG_BREAKPOINT:
942 case VINF_EM_DBG_HYPER_STEPPED:
943 case VINF_EM_DBG_HYPER_BREAKPOINT:
944 case VINF_EM_DBG_HYPER_ASSERTION:
945 break;
946
947 /*
948 * Resuming execution (in some form) has to be done here if we got
949 * a hypervisor debug event.
950 */
951 case VINF_SUCCESS:
952 case VINF_EM_RESUME:
953 case VINF_EM_SUSPEND:
954 case VINF_EM_RESCHEDULE:
955 case VINF_EM_RESCHEDULE_RAW:
956 case VINF_EM_RESCHEDULE_REM:
957 case VINF_EM_HALT:
958 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
959 {
960#ifdef VBOX_WITH_RAW_MODE
961 rc = emR3RawResumeHyper(pVM, pVCpu);
962 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
963 continue;
964#else
965 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
966#endif
967 }
968 if (rc == VINF_SUCCESS)
969 rc = VINF_EM_RESCHEDULE;
970 return rc;
971
972 /*
973 * The debugger isn't attached.
974 * We'll simply turn the thing off since that's the easiest thing to do.
975 */
976 case VERR_DBGF_NOT_ATTACHED:
977 switch (VBOXSTRICTRC_VAL(rcLast))
978 {
979 case VINF_EM_DBG_HYPER_STEPPED:
980 case VINF_EM_DBG_HYPER_BREAKPOINT:
981 case VINF_EM_DBG_HYPER_ASSERTION:
982 case VERR_TRPM_PANIC:
983 case VERR_TRPM_DONT_PANIC:
984 case VERR_VMM_RING0_ASSERTION:
985 case VERR_VMM_HYPER_CR3_MISMATCH:
986 case VERR_VMM_RING3_CALL_DISABLED:
987 return rcLast;
988 }
989 return VINF_EM_OFF;
990
991 /*
992 * Status codes terminating the VM in one or another sense.
993 */
994 case VINF_EM_TERMINATE:
995 case VINF_EM_OFF:
996 case VINF_EM_RESET:
997 case VINF_EM_NO_MEMORY:
998 case VINF_EM_RAW_STALE_SELECTOR:
999 case VINF_EM_RAW_IRET_TRAP:
1000 case VERR_TRPM_PANIC:
1001 case VERR_TRPM_DONT_PANIC:
1002 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1003 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1004 case VERR_VMM_RING0_ASSERTION:
1005 case VERR_VMM_HYPER_CR3_MISMATCH:
1006 case VERR_VMM_RING3_CALL_DISABLED:
1007 case VERR_INTERNAL_ERROR:
1008 case VERR_INTERNAL_ERROR_2:
1009 case VERR_INTERNAL_ERROR_3:
1010 case VERR_INTERNAL_ERROR_4:
1011 case VERR_INTERNAL_ERROR_5:
1012 case VERR_IPE_UNEXPECTED_STATUS:
1013 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1014 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1015 return rc;
1016
1017 /*
1018 * The rest is unexpected, and will keep us here.
1019 */
1020 default:
1021 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1022 break;
1023 }
1024 } /* debug for ever */
1025}
1026
1027
1028#if defined(VBOX_WITH_REM) || defined(DEBUG)
1029/**
1030 * Steps recompiled code.
1031 *
1032 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1033 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1034 *
1035 * @param pVM The cross context VM structure.
1036 * @param pVCpu The cross context virtual CPU structure.
1037 */
1038static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1039{
1040 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1041
1042# ifdef VBOX_WITH_REM
1043 EMRemLock(pVM);
1044
1045 /*
1046 * Switch to REM, step instruction, switch back.
1047 */
1048 int rc = REMR3State(pVM, pVCpu);
1049 if (RT_SUCCESS(rc))
1050 {
1051 rc = REMR3Step(pVM, pVCpu);
1052 REMR3StateBack(pVM, pVCpu);
1053 }
1054 EMRemUnlock(pVM);
1055
1056# else
1057 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1058# endif
1059
1060 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1061 return rc;
1062}
1063#endif /* VBOX_WITH_REM || DEBUG */
1064
1065
1066#ifdef VBOX_WITH_REM
1067/**
1068 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1069 * critical section.
1070 *
1071 * @returns false - new fInREMState value.
1072 * @param pVM The cross context VM structure.
1073 * @param pVCpu The cross context virtual CPU structure.
1074 */
1075DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1076{
1077 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1078 REMR3StateBack(pVM, pVCpu);
1079 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1080
1081 EMRemUnlock(pVM);
1082 return false;
1083}
1084#endif
1085
1086
1087/**
1088 * Executes recompiled code.
1089 *
1090 * This function contains the recompiler version of the inner
1091 * execution loop (the outer loop being in EMR3ExecuteVM()).
1092 *
1093 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1094 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1095 *
1096 * @param pVM The cross context VM structure.
1097 * @param pVCpu The cross context virtual CPU structure.
1098 * @param pfFFDone Where to store an indicator telling whether or not
1099 * FFs were done before returning.
1100 *
1101 */
1102static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1103{
1104#ifdef LOG_ENABLED
1105 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1106 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1107
1108 if (pCtx->eflags.Bits.u1VM)
1109 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1110 else
1111 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1112#endif
1113 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1114
1115#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1116 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1117 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1118 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1119#endif
1120
1121 /*
1122 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1123 * or the REM suggests raw-mode execution.
1124 */
1125 *pfFFDone = false;
1126#ifdef VBOX_WITH_REM
1127 bool fInREMState = false;
1128#else
1129 uint32_t cLoops = 0;
1130#endif
1131 int rc = VINF_SUCCESS;
1132 for (;;)
1133 {
1134#ifdef VBOX_WITH_REM
1135 /*
1136 * Lock REM and update the state if not already in sync.
1137 *
1138 * Note! Big lock, but you are not supposed to own any lock when
1139 * coming in here.
1140 */
1141 if (!fInREMState)
1142 {
1143 EMRemLock(pVM);
1144 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1145
1146 /* Flush the recompiler translation blocks if the VCPU has changed,
1147 also force a full CPU state resync. */
1148 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1149 {
1150 REMFlushTBs(pVM);
1151 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1152 }
1153 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1154
1155 rc = REMR3State(pVM, pVCpu);
1156
1157 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1158 if (RT_FAILURE(rc))
1159 break;
1160 fInREMState = true;
1161
1162 /*
1163 * We might have missed the raising of VMREQ, TIMER and some other
1164 * important FFs while we were busy switching the state. So, check again.
1165 */
1166 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1167 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1168 {
1169 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1170 goto l_REMDoForcedActions;
1171 }
1172 }
1173#endif
1174
1175 /*
1176 * Execute REM.
1177 */
1178 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1179 {
1180 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1181#ifdef VBOX_WITH_REM
1182 rc = REMR3Run(pVM, pVCpu);
1183#else
1184 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1185#endif
1186 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1187 }
1188 else
1189 {
1190 /* Give up this time slice; virtual time continues */
1191 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1192 RTThreadSleep(5);
1193 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1194 rc = VINF_SUCCESS;
1195 }
1196
1197 /*
1198 * Deal with high priority post execution FFs before doing anything
1199 * else. Sync back the state and leave the lock to be on the safe side.
1200 */
1201 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1202 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1203 {
1204#ifdef VBOX_WITH_REM
1205 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1206#endif
1207 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1208 }
1209
1210 /*
1211 * Process the returned status code.
1212 */
1213 if (rc != VINF_SUCCESS)
1214 {
1215 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1216 break;
1217 if (rc != VINF_REM_INTERRUPED_FF)
1218 {
1219#ifndef VBOX_WITH_REM
1220 /* Try dodge unimplemented IEM trouble by reschduling. */
1221 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1222 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1223 {
1224 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1225 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1226 {
1227 rc = VINF_EM_RESCHEDULE;
1228 break;
1229 }
1230 }
1231#endif
1232
1233 /*
1234 * Anything which is not known to us means an internal error
1235 * and the termination of the VM!
1236 */
1237 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1238 break;
1239 }
1240 }
1241
1242
1243 /*
1244 * Check and execute forced actions.
1245 *
1246 * Sync back the VM state and leave the lock before calling any of
1247 * these, you never know what's going to happen here.
1248 */
1249#ifdef VBOX_HIGH_RES_TIMERS_HACK
1250 TMTimerPollVoid(pVM, pVCpu);
1251#endif
1252 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1253 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1254 || VMCPU_FF_IS_PENDING(pVCpu,
1255 VMCPU_FF_ALL_REM_MASK
1256 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1257 {
1258#ifdef VBOX_WITH_REM
1259l_REMDoForcedActions:
1260 if (fInREMState)
1261 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1262#endif
1263 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1264 rc = emR3ForcedActions(pVM, pVCpu, rc);
1265 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1266 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1267 if ( rc != VINF_SUCCESS
1268 && rc != VINF_EM_RESCHEDULE_REM)
1269 {
1270 *pfFFDone = true;
1271 break;
1272 }
1273 }
1274
1275#ifndef VBOX_WITH_REM
1276 /*
1277 * Have to check if we can get back to fast execution mode every so often.
1278 */
1279 if (!(++cLoops & 7))
1280 {
1281 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1282 if ( enmCheck != EMSTATE_REM
1283 && enmCheck != EMSTATE_IEM_THEN_REM)
1284 return VINF_EM_RESCHEDULE;
1285 }
1286#endif
1287
1288 } /* The Inner Loop, recompiled execution mode version. */
1289
1290
1291#ifdef VBOX_WITH_REM
1292 /*
1293 * Returning. Sync back the VM state if required.
1294 */
1295 if (fInREMState)
1296 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1297#endif
1298
1299 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1300 return rc;
1301}
1302
1303
1304#ifdef DEBUG
1305
1306int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1307{
1308 EMSTATE enmOldState = pVCpu->em.s.enmState;
1309
1310 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1311
1312 Log(("Single step BEGIN:\n"));
1313 for (uint32_t i = 0; i < cIterations; i++)
1314 {
1315 DBGFR3PrgStep(pVCpu);
1316 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1317 emR3RemStep(pVM, pVCpu);
1318 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1319 break;
1320 }
1321 Log(("Single step END:\n"));
1322 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1323 pVCpu->em.s.enmState = enmOldState;
1324 return VINF_EM_RESCHEDULE;
1325}
1326
1327#endif /* DEBUG */
1328
1329
1330/**
1331 * Try execute the problematic code in IEM first, then fall back on REM if there
1332 * is too much of it or if IEM doesn't implement something.
1333 *
1334 * @returns Strict VBox status code from IEMExecLots.
1335 * @param pVM The cross context VM structure.
1336 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1337 * @param pfFFDone Force flags done indicator.
1338 *
1339 * @thread EMT(pVCpu)
1340 */
1341static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1342{
1343 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1344 *pfFFDone = false;
1345
1346 /*
1347 * Execute in IEM for a while.
1348 */
1349 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1350 {
1351 uint32_t cInstructions;
1352 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1353 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1354 if (rcStrict != VINF_SUCCESS)
1355 {
1356 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1357 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1358 break;
1359
1360 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1361 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1362 return rcStrict;
1363 }
1364
1365 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1366 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1367 {
1368 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1369 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1370 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1371 pVCpu->em.s.enmState = enmNewState;
1372 return VINF_SUCCESS;
1373 }
1374
1375 /*
1376 * Check for pending actions.
1377 */
1378 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1379 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1380 return VINF_SUCCESS;
1381 }
1382
1383 /*
1384 * Switch to REM.
1385 */
1386 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1387 pVCpu->em.s.enmState = EMSTATE_REM;
1388 return VINF_SUCCESS;
1389}
1390
1391
1392/**
1393 * Decides whether to execute RAW, HWACC or REM.
1394 *
1395 * @returns new EM state
1396 * @param pVM The cross context VM structure.
1397 * @param pVCpu The cross context virtual CPU structure.
1398 * @param pCtx Pointer to the guest CPU context.
1399 */
1400EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1401{
1402 /*
1403 * When forcing raw-mode execution, things are simple.
1404 */
1405 if (pVCpu->em.s.fForceRAW)
1406 return EMSTATE_RAW;
1407
1408 /*
1409 * We stay in the wait for SIPI state unless explicitly told otherwise.
1410 */
1411 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1412 return EMSTATE_WAIT_SIPI;
1413
1414 /*
1415 * Execute everything in IEM?
1416 */
1417 if (pVM->em.s.fIemExecutesAll)
1418 return EMSTATE_IEM;
1419
1420 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1421 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1422 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1423
1424 X86EFLAGS EFlags = pCtx->eflags;
1425 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1426 {
1427 if (EMIsHwVirtExecutionEnabled(pVM))
1428 {
1429 if (VM_IS_HM_ENABLED(pVM))
1430 {
1431 if (HMR3CanExecuteGuest(pVM, pCtx))
1432 return EMSTATE_HM;
1433 }
1434 else if (NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
1435 return EMSTATE_NEM;
1436
1437 /*
1438 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1439 * turns off monitoring features essential for raw mode!
1440 */
1441 return EMSTATE_IEM_THEN_REM;
1442 }
1443 }
1444
1445 /*
1446 * Standard raw-mode:
1447 *
1448 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1449 * or 32 bits protected mode ring 0 code
1450 *
1451 * The tests are ordered by the likelihood of being true during normal execution.
1452 */
1453 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1454 {
1455 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1456 return EMSTATE_REM;
1457 }
1458
1459# ifndef VBOX_RAW_V86
1460 if (EFlags.u32 & X86_EFL_VM) {
1461 Log2(("raw mode refused: VM_MASK\n"));
1462 return EMSTATE_REM;
1463 }
1464# endif
1465
1466 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1467 uint32_t u32CR0 = pCtx->cr0;
1468 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1469 {
1470 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1471 return EMSTATE_REM;
1472 }
1473
1474 if (pCtx->cr4 & X86_CR4_PAE)
1475 {
1476 uint32_t u32Dummy, u32Features;
1477
1478 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1479 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1480 return EMSTATE_REM;
1481 }
1482
1483 unsigned uSS = pCtx->ss.Sel;
1484 if ( pCtx->eflags.Bits.u1VM
1485 || (uSS & X86_SEL_RPL) == 3)
1486 {
1487 if (!EMIsRawRing3Enabled(pVM))
1488 return EMSTATE_REM;
1489
1490 if (!(EFlags.u32 & X86_EFL_IF))
1491 {
1492 Log2(("raw mode refused: IF (RawR3)\n"));
1493 return EMSTATE_REM;
1494 }
1495
1496 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1497 {
1498 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1499 return EMSTATE_REM;
1500 }
1501 }
1502 else
1503 {
1504 if (!EMIsRawRing0Enabled(pVM))
1505 return EMSTATE_REM;
1506
1507 if (EMIsRawRing1Enabled(pVM))
1508 {
1509 /* Only ring 0 and 1 supervisor code. */
1510 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1511 {
1512 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1513 return EMSTATE_REM;
1514 }
1515 }
1516 /* Only ring 0 supervisor code. */
1517 else if ((uSS & X86_SEL_RPL) != 0)
1518 {
1519 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1520 return EMSTATE_REM;
1521 }
1522
1523 // Let's start with pure 32 bits ring 0 code first
1524 /** @todo What's pure 32-bit mode? flat? */
1525 if ( !(pCtx->ss.Attr.n.u1DefBig)
1526 || !(pCtx->cs.Attr.n.u1DefBig))
1527 {
1528 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1529 return EMSTATE_REM;
1530 }
1531
1532 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1533 if (!(u32CR0 & X86_CR0_WP))
1534 {
1535 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1536 return EMSTATE_REM;
1537 }
1538
1539# ifdef VBOX_WITH_RAW_MODE
1540 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1541 {
1542 Log2(("raw r0 mode forced: patch code\n"));
1543# ifdef VBOX_WITH_SAFE_STR
1544 Assert(pCtx->tr.Sel);
1545# endif
1546 return EMSTATE_RAW;
1547 }
1548# endif /* VBOX_WITH_RAW_MODE */
1549
1550# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1551 if (!(EFlags.u32 & X86_EFL_IF))
1552 {
1553 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1554 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1555 return EMSTATE_REM;
1556 }
1557# endif
1558
1559# ifndef VBOX_WITH_RAW_RING1
1560 /** @todo still necessary??? */
1561 if (EFlags.Bits.u2IOPL != 0)
1562 {
1563 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1564 return EMSTATE_REM;
1565 }
1566# endif
1567 }
1568
1569 /*
1570 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1571 */
1572 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1573 {
1574 Log2(("raw mode refused: stale CS\n"));
1575 return EMSTATE_REM;
1576 }
1577 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1578 {
1579 Log2(("raw mode refused: stale SS\n"));
1580 return EMSTATE_REM;
1581 }
1582 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1583 {
1584 Log2(("raw mode refused: stale DS\n"));
1585 return EMSTATE_REM;
1586 }
1587 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1588 {
1589 Log2(("raw mode refused: stale ES\n"));
1590 return EMSTATE_REM;
1591 }
1592 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1593 {
1594 Log2(("raw mode refused: stale FS\n"));
1595 return EMSTATE_REM;
1596 }
1597 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1598 {
1599 Log2(("raw mode refused: stale GS\n"));
1600 return EMSTATE_REM;
1601 }
1602
1603# ifdef VBOX_WITH_SAFE_STR
1604 if (pCtx->tr.Sel == 0)
1605 {
1606 Log(("Raw mode refused -> TR=0\n"));
1607 return EMSTATE_REM;
1608 }
1609# endif
1610
1611 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1612 return EMSTATE_RAW;
1613}
1614
1615
1616/**
1617 * Executes all high priority post execution force actions.
1618 *
1619 * @returns rc or a fatal status code.
1620 *
1621 * @param pVM The cross context VM structure.
1622 * @param pVCpu The cross context virtual CPU structure.
1623 * @param rc The current rc.
1624 */
1625int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1626{
1627 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1628
1629 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1630 PDMCritSectBothFF(pVCpu);
1631
1632 /* Update CR3 (Nested Paging case for HM). */
1633 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1634 {
1635 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1636 if (RT_FAILURE(rc2))
1637 return rc2;
1638 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1639 }
1640
1641 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1642 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1643 {
1644 if (CPUMIsGuestInPAEMode(pVCpu))
1645 {
1646 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1647 AssertPtr(pPdpes);
1648
1649 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1650 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1651 }
1652 else
1653 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1654 }
1655
1656 /* IEM has pending work (typically memory write after INS instruction). */
1657 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1658 rc = VBOXSTRICTRC_TODO(IEMR3ProcessForceFlag(pVM, pVCpu, rc));
1659
1660 /* IOM has pending work (comitting an I/O or MMIO write). */
1661 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1662 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
1663
1664#ifdef VBOX_WITH_RAW_MODE
1665 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1666 CSAMR3DoPendingAction(pVM, pVCpu);
1667#endif
1668
1669 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1670 {
1671 if ( rc > VINF_EM_NO_MEMORY
1672 && rc <= VINF_EM_LAST)
1673 rc = VINF_EM_NO_MEMORY;
1674 }
1675
1676 return rc;
1677}
1678
1679#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1680/**
1681 * Helper for emR3ForcedActions() for injecting interrupts into the
1682 * nested-guest.
1683 *
1684 * @returns VBox status code.
1685 * @param pVCpu The cross context virtual CPU structure.
1686 * @param pCtx Pointer to the nested-guest CPU context.
1687 * @param pfResched Where to store whether a reschedule is required.
1688 * @param pfInject Where to store whether an interrupt was injected (and if
1689 * a wake up is pending).
1690 */
1691static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1692{
1693 *pfResched = false;
1694 *pfInject = false;
1695 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1696 {
1697 PVM pVM = pVCpu->CTX_SUFF(pVM);
1698 Assert(pCtx->hwvirt.fGif);
1699 bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
1700#ifdef VBOX_WITH_RAW_MODE
1701 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1702#endif
1703 if (fVirtualGif)
1704 {
1705 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1706 {
1707 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1708 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1709 {
1710 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1711 {
1712 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1713 if (RT_SUCCESS(rcStrict))
1714 {
1715 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1716 * doesn't intercept HLT but intercepts INTR? */
1717 *pfResched = true;
1718 if (rcStrict == VINF_SVM_VMEXIT)
1719 return VINF_SUCCESS;
1720 if (rcStrict == VINF_PGM_CHANGE_MODE)
1721 return PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1722 return VBOXSTRICTRC_VAL(rcStrict);
1723 }
1724
1725 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1726 return VINF_EM_TRIPLE_FAULT;
1727 }
1728
1729 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1730 /** @todo this really isn't nice, should properly handle this */
1731 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1732 if (rc == VINF_SVM_VMEXIT)
1733 rc = VINF_SUCCESS;
1734 else if (rc == VINF_PGM_CHANGE_MODE)
1735 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1736 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1737 || rc == VINF_EM_RESCHEDULE_HM
1738 || rc == VINF_EM_RESCHEDULE_RAW))
1739 {
1740 rc = VINF_EM_RESCHEDULE;
1741 }
1742
1743 *pfResched = true;
1744 *pfInject = true;
1745 return rc;
1746 }
1747 }
1748
1749 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1750 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1751 {
1752 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1753 {
1754 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1755 if (RT_SUCCESS(rcStrict))
1756 {
1757 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1758 * doesn't intercept HLT but intercepts VINTR? */
1759 *pfResched = true;
1760 if (rcStrict == VINF_SVM_VMEXIT)
1761 return VINF_SUCCESS;
1762 if (rcStrict == VINF_PGM_CHANGE_MODE)
1763 return PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1764 return VBOXSTRICTRC_VAL(rcStrict);
1765 }
1766
1767 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1768 return VINF_EM_TRIPLE_FAULT;
1769 }
1770
1771 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1772 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1773 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1774 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1775 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1776
1777 *pfResched = true;
1778 *pfInject = true;
1779 return VINF_EM_RESCHEDULE;
1780 }
1781 }
1782 return VINF_SUCCESS;
1783 }
1784
1785 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1786 { /** @todo Nested VMX. */ }
1787
1788 /* Shouldn't really get here. */
1789 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1790 return VERR_EM_INTERNAL_ERROR;
1791}
1792#endif
1793
1794/**
1795 * Executes all pending forced actions.
1796 *
1797 * Forced actions can cause execution delays and execution
1798 * rescheduling. The first we deal with using action priority, so
1799 * that for instance pending timers aren't scheduled and ran until
1800 * right before execution. The rescheduling we deal with using
1801 * return codes. The same goes for VM termination, only in that case
1802 * we exit everything.
1803 *
1804 * @returns VBox status code of equal or greater importance/severity than rc.
1805 * The most important ones are: VINF_EM_RESCHEDULE,
1806 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1807 *
1808 * @param pVM The cross context VM structure.
1809 * @param pVCpu The cross context virtual CPU structure.
1810 * @param rc The current rc.
1811 *
1812 */
1813int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1814{
1815 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1816#ifdef VBOX_STRICT
1817 int rcIrq = VINF_SUCCESS;
1818#endif
1819 int rc2;
1820#define UPDATE_RC() \
1821 do { \
1822 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1823 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1824 break; \
1825 if (!rc || rc2 < rc) \
1826 rc = rc2; \
1827 } while (0)
1828 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1829
1830 /*
1831 * Post execution chunk first.
1832 */
1833 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1834 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1835 {
1836 /*
1837 * EMT Rendezvous (must be serviced before termination).
1838 */
1839 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1840 {
1841 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1842 UPDATE_RC();
1843 /** @todo HACK ALERT! The following test is to make sure EM+TM
1844 * thinks the VM is stopped/reset before the next VM state change
1845 * is made. We need a better solution for this, or at least make it
1846 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1847 * VINF_EM_SUSPEND). */
1848 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1849 {
1850 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1851 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1852 return rc;
1853 }
1854 }
1855
1856 /*
1857 * State change request (cleared by vmR3SetStateLocked).
1858 */
1859 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1860 {
1861 VMSTATE enmState = VMR3GetState(pVM);
1862 switch (enmState)
1863 {
1864 case VMSTATE_FATAL_ERROR:
1865 case VMSTATE_FATAL_ERROR_LS:
1866 case VMSTATE_GURU_MEDITATION:
1867 case VMSTATE_GURU_MEDITATION_LS:
1868 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1869 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1870 return VINF_EM_SUSPEND;
1871
1872 case VMSTATE_DESTROYING:
1873 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1874 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1875 return VINF_EM_TERMINATE;
1876
1877 default:
1878 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1879 }
1880 }
1881
1882 /*
1883 * Debugger Facility polling.
1884 */
1885 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1886 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1887 {
1888 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1889 UPDATE_RC();
1890 }
1891
1892 /*
1893 * Postponed reset request.
1894 */
1895 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1896 {
1897 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1898 UPDATE_RC();
1899 }
1900
1901#ifdef VBOX_WITH_RAW_MODE
1902 /*
1903 * CSAM page scanning.
1904 */
1905 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1906 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1907 {
1908 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1909
1910 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
1911 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1912
1913 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1914 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1915 }
1916#endif
1917
1918 /*
1919 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1920 */
1921 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1922 {
1923 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1924 UPDATE_RC();
1925 if (rc == VINF_EM_NO_MEMORY)
1926 return rc;
1927 }
1928
1929 /* check that we got them all */
1930 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1931 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1932 }
1933
1934 /*
1935 * Normal priority then.
1936 * (Executed in no particular order.)
1937 */
1938 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1939 {
1940 /*
1941 * PDM Queues are pending.
1942 */
1943 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1944 PDMR3QueueFlushAll(pVM);
1945
1946 /*
1947 * PDM DMA transfers are pending.
1948 */
1949 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1950 PDMR3DmaRun(pVM);
1951
1952 /*
1953 * EMT Rendezvous (make sure they are handled before the requests).
1954 */
1955 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1956 {
1957 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1958 UPDATE_RC();
1959 /** @todo HACK ALERT! The following test is to make sure EM+TM
1960 * thinks the VM is stopped/reset before the next VM state change
1961 * is made. We need a better solution for this, or at least make it
1962 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1963 * VINF_EM_SUSPEND). */
1964 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1965 {
1966 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1967 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1968 return rc;
1969 }
1970 }
1971
1972 /*
1973 * Requests from other threads.
1974 */
1975 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1976 {
1977 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1978 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1979 {
1980 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1981 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1982 return rc2;
1983 }
1984 UPDATE_RC();
1985 /** @todo HACK ALERT! The following test is to make sure EM+TM
1986 * thinks the VM is stopped/reset before the next VM state change
1987 * is made. We need a better solution for this, or at least make it
1988 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1989 * VINF_EM_SUSPEND). */
1990 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1991 {
1992 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1993 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1994 return rc;
1995 }
1996 }
1997
1998#ifdef VBOX_WITH_REM
1999 /* Replay the handler notification changes. */
2000 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2001 {
2002 /* Try not to cause deadlocks. */
2003 if ( pVM->cCpus == 1
2004 || ( !PGMIsLockOwner(pVM)
2005 && !IOMIsLockWriteOwner(pVM))
2006 )
2007 {
2008 EMRemLock(pVM);
2009 REMR3ReplayHandlerNotifications(pVM);
2010 EMRemUnlock(pVM);
2011 }
2012 }
2013#endif
2014
2015 /* check that we got them all */
2016 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2017 }
2018
2019 /*
2020 * Normal priority then. (per-VCPU)
2021 * (Executed in no particular order.)
2022 */
2023 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2024 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2025 {
2026 /*
2027 * Requests from other threads.
2028 */
2029 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2030 {
2031 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2032 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2033 {
2034 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2035 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2036 return rc2;
2037 }
2038 UPDATE_RC();
2039 /** @todo HACK ALERT! The following test is to make sure EM+TM
2040 * thinks the VM is stopped/reset before the next VM state change
2041 * is made. We need a better solution for this, or at least make it
2042 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2043 * VINF_EM_SUSPEND). */
2044 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2045 {
2046 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2047 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2048 return rc;
2049 }
2050 }
2051
2052 /* check that we got them all */
2053 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2054 }
2055
2056 /*
2057 * High priority pre execution chunk last.
2058 * (Executed in ascending priority order.)
2059 */
2060 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2061 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2062 {
2063 /*
2064 * Timers before interrupts.
2065 */
2066 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2067 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2068 TMR3TimerQueuesDo(pVM);
2069
2070 /*
2071 * Pick up asynchronously posted interrupts into the APIC.
2072 */
2073 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2074 APICUpdatePendingInterrupts(pVCpu);
2075
2076 /*
2077 * The instruction following an emulated STI should *always* be executed!
2078 *
2079 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2080 * the eip is the same as the inhibited instr address. Before we
2081 * are able to execute this instruction in raw mode (iret to
2082 * guest code) an external interrupt might force a world switch
2083 * again. Possibly allowing a guest interrupt to be dispatched
2084 * in the process. This could break the guest. Sounds very
2085 * unlikely, but such timing sensitive problem are not as rare as
2086 * you might think.
2087 */
2088 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2089 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2090 {
2091 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2092 {
2093 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2094 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2095 }
2096 else
2097 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2098 }
2099
2100 /*
2101 * Interrupts.
2102 */
2103 bool fWakeupPending = false;
2104 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2105 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2106 {
2107 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2108 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2109 {
2110 Assert(!HMR3IsEventPending(pVCpu));
2111 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2112#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2113 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2114 {
2115 bool fResched, fInject;
2116 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2117 if (fInject)
2118 {
2119 fWakeupPending = true;
2120#ifdef VBOX_STRICT
2121 rcIrq = rc2;
2122#endif
2123 }
2124 if (fResched)
2125 UPDATE_RC();
2126 }
2127 else
2128#endif
2129 {
2130 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2131#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2132 && pCtx->hwvirt.fGif
2133#endif
2134#ifdef VBOX_WITH_RAW_MODE
2135 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2136#endif
2137 && pCtx->eflags.Bits.u1IF)
2138 {
2139 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2140 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2141 /** @todo this really isn't nice, should properly handle this */
2142 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2143 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2144 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2145 || rc2 == VINF_EM_RESCHEDULE_HM
2146 || rc2 == VINF_EM_RESCHEDULE_RAW))
2147 {
2148 rc2 = VINF_EM_RESCHEDULE;
2149 }
2150#ifdef VBOX_STRICT
2151 rcIrq = rc2;
2152#endif
2153 UPDATE_RC();
2154 /* Reschedule required: We must not miss the wakeup below! */
2155 fWakeupPending = true;
2156 }
2157 }
2158 }
2159 }
2160
2161 /*
2162 * Allocate handy pages.
2163 */
2164 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2165 {
2166 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2167 UPDATE_RC();
2168 }
2169
2170 /*
2171 * Debugger Facility request.
2172 */
2173 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2174 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2175 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2176 {
2177 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2178 UPDATE_RC();
2179 }
2180
2181 /*
2182 * EMT Rendezvous (must be serviced before termination).
2183 */
2184 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2185 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2186 {
2187 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2188 UPDATE_RC();
2189 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2190 * stopped/reset before the next VM state change is made. We need a better
2191 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2192 * && rc >= VINF_EM_SUSPEND). */
2193 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2194 {
2195 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2196 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2197 return rc;
2198 }
2199 }
2200
2201 /*
2202 * State change request (cleared by vmR3SetStateLocked).
2203 */
2204 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2205 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2206 {
2207 VMSTATE enmState = VMR3GetState(pVM);
2208 switch (enmState)
2209 {
2210 case VMSTATE_FATAL_ERROR:
2211 case VMSTATE_FATAL_ERROR_LS:
2212 case VMSTATE_GURU_MEDITATION:
2213 case VMSTATE_GURU_MEDITATION_LS:
2214 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2215 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2216 return VINF_EM_SUSPEND;
2217
2218 case VMSTATE_DESTROYING:
2219 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2220 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2221 return VINF_EM_TERMINATE;
2222
2223 default:
2224 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2225 }
2226 }
2227
2228 /*
2229 * Out of memory? Since most of our fellow high priority actions may cause us
2230 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2231 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2232 * than us since we can terminate without allocating more memory.
2233 */
2234 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2235 {
2236 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2237 UPDATE_RC();
2238 if (rc == VINF_EM_NO_MEMORY)
2239 return rc;
2240 }
2241
2242 /*
2243 * If the virtual sync clock is still stopped, make TM restart it.
2244 */
2245 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2246 TMR3VirtualSyncFF(pVM, pVCpu);
2247
2248#ifdef DEBUG
2249 /*
2250 * Debug, pause the VM.
2251 */
2252 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2253 {
2254 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2255 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2256 return VINF_EM_SUSPEND;
2257 }
2258#endif
2259
2260 /* check that we got them all */
2261 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2262 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2263 }
2264
2265#undef UPDATE_RC
2266 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2267 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2268 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2269 return rc;
2270}
2271
2272
2273/**
2274 * Check if the preset execution time cap restricts guest execution scheduling.
2275 *
2276 * @returns true if allowed, false otherwise
2277 * @param pVM The cross context VM structure.
2278 * @param pVCpu The cross context virtual CPU structure.
2279 */
2280bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2281{
2282 uint64_t u64UserTime, u64KernelTime;
2283
2284 if ( pVM->uCpuExecutionCap != 100
2285 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2286 {
2287 uint64_t u64TimeNow = RTTimeMilliTS();
2288 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2289 {
2290 /* New time slice. */
2291 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2292 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2293 pVCpu->em.s.u64TimeSliceExec = 0;
2294 }
2295 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2296
2297 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2298 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2299 return false;
2300 }
2301 return true;
2302}
2303
2304
2305/**
2306 * Execute VM.
2307 *
2308 * This function is the main loop of the VM. The emulation thread
2309 * calls this function when the VM has been successfully constructed
2310 * and we're ready for executing the VM.
2311 *
2312 * Returning from this function means that the VM is turned off or
2313 * suspended (state already saved) and deconstruction is next in line.
2314 *
2315 * All interaction from other thread are done using forced actions
2316 * and signaling of the wait object.
2317 *
2318 * @returns VBox status code, informational status codes may indicate failure.
2319 * @param pVM The cross context VM structure.
2320 * @param pVCpu The cross context virtual CPU structure.
2321 */
2322VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2323{
2324 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2325 pVM,
2326 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2327 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2328 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2329 pVCpu->em.s.fForceRAW));
2330 VM_ASSERT_EMT(pVM);
2331 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2332 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2333 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2334 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2335
2336 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2337 if (rc == 0)
2338 {
2339 /*
2340 * Start the virtual time.
2341 */
2342 TMR3NotifyResume(pVM, pVCpu);
2343
2344 /*
2345 * The Outer Main Loop.
2346 */
2347 bool fFFDone = false;
2348
2349 /* Reschedule right away to start in the right state. */
2350 rc = VINF_SUCCESS;
2351
2352 /* If resuming after a pause or a state load, restore the previous
2353 state or else we'll start executing code. Else, just reschedule. */
2354 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2355 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2356 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2357 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2358 else
2359 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2360 pVCpu->em.s.cIemThenRemInstructions = 0;
2361 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2362
2363 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2364 for (;;)
2365 {
2366 /*
2367 * Before we can schedule anything (we're here because
2368 * scheduling is required) we must service any pending
2369 * forced actions to avoid any pending action causing
2370 * immediate rescheduling upon entering an inner loop
2371 *
2372 * Do forced actions.
2373 */
2374 if ( !fFFDone
2375 && RT_SUCCESS(rc)
2376 && rc != VINF_EM_TERMINATE
2377 && rc != VINF_EM_OFF
2378 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2379 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2380 {
2381 rc = emR3ForcedActions(pVM, pVCpu, rc);
2382 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2383 if ( ( rc == VINF_EM_RESCHEDULE_REM
2384 || rc == VINF_EM_RESCHEDULE_HM)
2385 && pVCpu->em.s.fForceRAW)
2386 rc = VINF_EM_RESCHEDULE_RAW;
2387 }
2388 else if (fFFDone)
2389 fFFDone = false;
2390
2391 /*
2392 * Now what to do?
2393 */
2394 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2395 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2396 switch (rc)
2397 {
2398 /*
2399 * Keep doing what we're currently doing.
2400 */
2401 case VINF_SUCCESS:
2402 break;
2403
2404 /*
2405 * Reschedule - to raw-mode execution.
2406 */
2407/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2408 case VINF_EM_RESCHEDULE_RAW:
2409 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2410 if (VM_IS_RAW_MODE_ENABLED(pVM))
2411 {
2412 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2413 pVCpu->em.s.enmState = EMSTATE_RAW;
2414 }
2415 else
2416 {
2417 AssertLogRelFailed();
2418 pVCpu->em.s.enmState = EMSTATE_NONE;
2419 }
2420 break;
2421
2422 /*
2423 * Reschedule - to HM or NEM.
2424 */
2425 case VINF_EM_RESCHEDULE_HM:
2426 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2427 Assert(!pVCpu->em.s.fForceRAW);
2428 if (VM_IS_HM_ENABLED(pVM))
2429 {
2430 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2431 pVCpu->em.s.enmState = EMSTATE_HM;
2432 }
2433 else if (VM_IS_NEM_ENABLED(pVM))
2434 {
2435 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2436 pVCpu->em.s.enmState = EMSTATE_NEM;
2437 }
2438 else
2439 {
2440 AssertLogRelFailed();
2441 pVCpu->em.s.enmState = EMSTATE_NONE;
2442 }
2443 break;
2444
2445 /*
2446 * Reschedule - to recompiled execution.
2447 */
2448 case VINF_EM_RESCHEDULE_REM:
2449 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2450 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2451 {
2452 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2453 enmOldState, EMSTATE_IEM_THEN_REM));
2454 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2455 {
2456 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2457 pVCpu->em.s.cIemThenRemInstructions = 0;
2458 }
2459 }
2460 else
2461 {
2462 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2463 pVCpu->em.s.enmState = EMSTATE_REM;
2464 }
2465 break;
2466
2467 /*
2468 * Resume.
2469 */
2470 case VINF_EM_RESUME:
2471 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2472 /* Don't reschedule in the halted or wait for SIPI case. */
2473 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2474 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2475 {
2476 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2477 break;
2478 }
2479 /* fall through and get scheduled. */
2480 RT_FALL_THRU();
2481
2482 /*
2483 * Reschedule.
2484 */
2485 case VINF_EM_RESCHEDULE:
2486 {
2487 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2488 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2489 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2490 pVCpu->em.s.cIemThenRemInstructions = 0;
2491 pVCpu->em.s.enmState = enmState;
2492 break;
2493 }
2494
2495 /*
2496 * Halted.
2497 */
2498 case VINF_EM_HALT:
2499 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2500 pVCpu->em.s.enmState = EMSTATE_HALTED;
2501 break;
2502
2503 /*
2504 * Switch to the wait for SIPI state (application processor only)
2505 */
2506 case VINF_EM_WAIT_SIPI:
2507 Assert(pVCpu->idCpu != 0);
2508 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2509 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2510 break;
2511
2512
2513 /*
2514 * Suspend.
2515 */
2516 case VINF_EM_SUSPEND:
2517 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2518 Assert(enmOldState != EMSTATE_SUSPENDED);
2519 pVCpu->em.s.enmPrevState = enmOldState;
2520 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2521 break;
2522
2523 /*
2524 * Reset.
2525 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2526 */
2527 case VINF_EM_RESET:
2528 {
2529 if (pVCpu->idCpu == 0)
2530 {
2531 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2532 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2533 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2534 pVCpu->em.s.cIemThenRemInstructions = 0;
2535 pVCpu->em.s.enmState = enmState;
2536 }
2537 else
2538 {
2539 /* All other VCPUs go into the wait for SIPI state. */
2540 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2541 }
2542 break;
2543 }
2544
2545 /*
2546 * Power Off.
2547 */
2548 case VINF_EM_OFF:
2549 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2550 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2551 TMR3NotifySuspend(pVM, pVCpu);
2552 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2553 return rc;
2554
2555 /*
2556 * Terminate the VM.
2557 */
2558 case VINF_EM_TERMINATE:
2559 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2560 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2561 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2562 TMR3NotifySuspend(pVM, pVCpu);
2563 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2564 return rc;
2565
2566
2567 /*
2568 * Out of memory, suspend the VM and stuff.
2569 */
2570 case VINF_EM_NO_MEMORY:
2571 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2572 Assert(enmOldState != EMSTATE_SUSPENDED);
2573 pVCpu->em.s.enmPrevState = enmOldState;
2574 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2575 TMR3NotifySuspend(pVM, pVCpu);
2576 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2577
2578 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2579 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2580 if (rc != VINF_EM_SUSPEND)
2581 {
2582 if (RT_SUCCESS_NP(rc))
2583 {
2584 AssertLogRelMsgFailed(("%Rrc\n", rc));
2585 rc = VERR_EM_INTERNAL_ERROR;
2586 }
2587 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2588 }
2589 return rc;
2590
2591 /*
2592 * Guest debug events.
2593 */
2594 case VINF_EM_DBG_STEPPED:
2595 case VINF_EM_DBG_STOP:
2596 case VINF_EM_DBG_EVENT:
2597 case VINF_EM_DBG_BREAKPOINT:
2598 case VINF_EM_DBG_STEP:
2599 if (enmOldState == EMSTATE_RAW)
2600 {
2601 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2602 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2603 }
2604 else if (enmOldState == EMSTATE_HM)
2605 {
2606 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2607 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2608 }
2609 else if (enmOldState == EMSTATE_NEM)
2610 {
2611 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2612 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2613 }
2614 else if (enmOldState == EMSTATE_REM)
2615 {
2616 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2617 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2618 }
2619 else
2620 {
2621 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2622 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2623 }
2624 break;
2625
2626 /*
2627 * Hypervisor debug events.
2628 */
2629 case VINF_EM_DBG_HYPER_STEPPED:
2630 case VINF_EM_DBG_HYPER_BREAKPOINT:
2631 case VINF_EM_DBG_HYPER_ASSERTION:
2632 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2633 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2634 break;
2635
2636 /*
2637 * Triple fault.
2638 */
2639 case VINF_EM_TRIPLE_FAULT:
2640 if (!pVM->em.s.fGuruOnTripleFault)
2641 {
2642 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2643 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2644 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2645 continue;
2646 }
2647 /* Else fall through and trigger a guru. */
2648 RT_FALL_THRU();
2649
2650 case VERR_VMM_RING0_ASSERTION:
2651 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2652 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2653 break;
2654
2655 /*
2656 * Any error code showing up here other than the ones we
2657 * know and process above are considered to be FATAL.
2658 *
2659 * Unknown warnings and informational status codes are also
2660 * included in this.
2661 */
2662 default:
2663 if (RT_SUCCESS_NP(rc))
2664 {
2665 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2666 rc = VERR_EM_INTERNAL_ERROR;
2667 }
2668 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2669 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2670 break;
2671 }
2672
2673 /*
2674 * Act on state transition.
2675 */
2676 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2677 if (enmOldState != enmNewState)
2678 {
2679 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2680
2681 /* Clear MWait flags and the unhalt FF. */
2682 if ( enmOldState == EMSTATE_HALTED
2683 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2684 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2685 && ( enmNewState == EMSTATE_RAW
2686 || enmNewState == EMSTATE_HM
2687 || enmNewState == EMSTATE_NEM
2688 || enmNewState == EMSTATE_REM
2689 || enmNewState == EMSTATE_IEM_THEN_REM
2690 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2691 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2692 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2693 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2694 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2695 {
2696 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2697 {
2698 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2699 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2700 }
2701 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2702 {
2703 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2704 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2705 }
2706 }
2707 }
2708 else
2709 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2710
2711 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2712 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2713
2714 /*
2715 * Act on the new state.
2716 */
2717 switch (enmNewState)
2718 {
2719 /*
2720 * Execute raw.
2721 */
2722 case EMSTATE_RAW:
2723#ifdef VBOX_WITH_RAW_MODE
2724 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2725#else
2726 AssertLogRelMsgFailed(("%Rrc\n", rc));
2727 rc = VERR_EM_INTERNAL_ERROR;
2728#endif
2729 break;
2730
2731 /*
2732 * Execute hardware accelerated raw.
2733 */
2734 case EMSTATE_HM:
2735 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2736 break;
2737
2738 /*
2739 * Execute hardware accelerated raw.
2740 */
2741 case EMSTATE_NEM:
2742 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2743 break;
2744
2745 /*
2746 * Execute recompiled.
2747 */
2748 case EMSTATE_REM:
2749 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2750 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2751 break;
2752
2753 /*
2754 * Execute in the interpreter.
2755 */
2756 case EMSTATE_IEM:
2757 {
2758#if 0 /* For testing purposes. */
2759 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2760 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2761 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2762 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2763 rc = VINF_SUCCESS;
2764 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2765#endif
2766 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2767 if (pVM->em.s.fIemExecutesAll)
2768 {
2769 Assert(rc != VINF_EM_RESCHEDULE_REM);
2770 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2771 Assert(rc != VINF_EM_RESCHEDULE_HM);
2772 }
2773 fFFDone = false;
2774 break;
2775 }
2776
2777 /*
2778 * Execute in IEM, hoping we can quickly switch aback to HM
2779 * or RAW execution. If our hopes fail, we go to REM.
2780 */
2781 case EMSTATE_IEM_THEN_REM:
2782 {
2783 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2784 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2785 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2786 break;
2787 }
2788
2789 /*
2790 * Application processor execution halted until SIPI.
2791 */
2792 case EMSTATE_WAIT_SIPI:
2793 /* no break */
2794 /*
2795 * hlt - execution halted until interrupt.
2796 */
2797 case EMSTATE_HALTED:
2798 {
2799 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2800 /* If HM (or someone else) store a pending interrupt in
2801 TRPM, it must be dispatched ASAP without any halting.
2802 Anything pending in TRPM has been accepted and the CPU
2803 should already be the right state to receive it. */
2804 if (TRPMHasTrap(pVCpu))
2805 rc = VINF_EM_RESCHEDULE;
2806 /* MWAIT has a special extension where it's woken up when
2807 an interrupt is pending even when IF=0. */
2808 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2809 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2810 {
2811 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2812 if (rc == VINF_SUCCESS)
2813 {
2814 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2815 APICUpdatePendingInterrupts(pVCpu);
2816
2817 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2818 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2819 {
2820 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2821 rc = VINF_EM_RESCHEDULE;
2822 }
2823 }
2824 }
2825 else
2826 {
2827 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2828 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2829 check VMCPU_FF_UPDATE_APIC here. */
2830 if ( rc == VINF_SUCCESS
2831 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2832 {
2833 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2834 rc = VINF_EM_RESCHEDULE;
2835 }
2836 }
2837
2838 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2839 break;
2840 }
2841
2842 /*
2843 * Suspended - return to VM.cpp.
2844 */
2845 case EMSTATE_SUSPENDED:
2846 TMR3NotifySuspend(pVM, pVCpu);
2847 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2848 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2849 return VINF_EM_SUSPEND;
2850
2851 /*
2852 * Debugging in the guest.
2853 */
2854 case EMSTATE_DEBUG_GUEST_RAW:
2855 case EMSTATE_DEBUG_GUEST_HM:
2856 case EMSTATE_DEBUG_GUEST_NEM:
2857 case EMSTATE_DEBUG_GUEST_IEM:
2858 case EMSTATE_DEBUG_GUEST_REM:
2859 TMR3NotifySuspend(pVM, pVCpu);
2860 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2861 TMR3NotifyResume(pVM, pVCpu);
2862 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2863 break;
2864
2865 /*
2866 * Debugging in the hypervisor.
2867 */
2868 case EMSTATE_DEBUG_HYPER:
2869 {
2870 TMR3NotifySuspend(pVM, pVCpu);
2871 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2872
2873 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2874 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2875 if (rc != VINF_SUCCESS)
2876 {
2877 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2878 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2879 else
2880 {
2881 /* switch to guru meditation mode */
2882 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2883 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2884 VMMR3FatalDump(pVM, pVCpu, rc);
2885 }
2886 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2887 return rc;
2888 }
2889
2890 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2891 TMR3NotifyResume(pVM, pVCpu);
2892 break;
2893 }
2894
2895 /*
2896 * Guru meditation takes place in the debugger.
2897 */
2898 case EMSTATE_GURU_MEDITATION:
2899 {
2900 TMR3NotifySuspend(pVM, pVCpu);
2901 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2902 VMMR3FatalDump(pVM, pVCpu, rc);
2903 emR3Debug(pVM, pVCpu, rc);
2904 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2905 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2906 return rc;
2907 }
2908
2909 /*
2910 * The states we don't expect here.
2911 */
2912 case EMSTATE_NONE:
2913 case EMSTATE_TERMINATING:
2914 default:
2915 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2916 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2917 TMR3NotifySuspend(pVM, pVCpu);
2918 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2919 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2920 return VERR_EM_INTERNAL_ERROR;
2921 }
2922 } /* The Outer Main Loop */
2923 }
2924 else
2925 {
2926 /*
2927 * Fatal error.
2928 */
2929 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2930 TMR3NotifySuspend(pVM, pVCpu);
2931 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2932 VMMR3FatalDump(pVM, pVCpu, rc);
2933 emR3Debug(pVM, pVCpu, rc);
2934 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2935 /** @todo change the VM state! */
2936 return rc;
2937 }
2938
2939 /* not reached */
2940}
2941
2942/**
2943 * Notify EM of a state change (used by FTM)
2944 *
2945 * @param pVM The cross context VM structure.
2946 */
2947VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2948{
2949 PVMCPU pVCpu = VMMGetCpu(pVM);
2950
2951 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2952 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2953 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2954 return VINF_SUCCESS;
2955}
2956
2957/**
2958 * Notify EM of a state change (used by FTM)
2959 *
2960 * @param pVM The cross context VM structure.
2961 */
2962VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2963{
2964 PVMCPU pVCpu = VMMGetCpu(pVM);
2965 EMSTATE enmCurState = pVCpu->em.s.enmState;
2966
2967 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2968 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2969 pVCpu->em.s.enmPrevState = enmCurState;
2970 return VINF_SUCCESS;
2971}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette