VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 42480

Last change on this file since 42480 was 42480, checked in by vboxsync, 12 years ago

Report CRx reads as reads and writes as writes in statistics, not vice versa.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 107.8 KB
Line 
1/* $Id: EM.cpp 42480 2012-07-31 13:36:49Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/patm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iom.h>
45#include <VBox/vmm/dbgf.h>
46#include <VBox/vmm/pgm.h>
47#ifdef VBOX_WITH_REM
48# include <VBox/vmm/rem.h>
49#else
50# include <VBox/vmm/iem.h>
51#endif
52#include <VBox/vmm/tm.h>
53#include <VBox/vmm/mm.h>
54#include <VBox/vmm/ssm.h>
55#include <VBox/vmm/pdmapi.h>
56#include <VBox/vmm/pdmcritsect.h>
57#include <VBox/vmm/pdmqueue.h>
58#include <VBox/vmm/hwaccm.h>
59#include <VBox/vmm/patm.h>
60#ifdef IEM_VERIFICATION_MODE
61# include <VBox/vmm/iem.h>
62#endif
63#include "EMInternal.h"
64#include "internal/em.h"
65#include <VBox/vmm/vm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include <VBox/vmm/dbgf.h>
70#include "VMMTracing.h"
71
72#include <iprt/asm.h>
73#include <iprt/string.h>
74#include <iprt/stream.h>
75#include <iprt/thread.h>
76
77
78/*******************************************************************************
79* Defined Constants And Macros *
80*******************************************************************************/
81#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
82#define EM_NOTIFY_HWACCM
83#endif
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
92static const char *emR3GetStateName(EMSTATE enmState);
93#endif
94static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
95static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
96static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
97int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM Pointer to the VM.
105 */
106VMMR3DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 bool fEnabled;
121 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &fEnabled);
122 pVM->fRecompileUser = RT_SUCCESS(rc) ? !fEnabled : false;
123 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &fEnabled);
124 pVM->fRecompileSupervisor = RT_SUCCESS(rc) ? !fEnabled : false;
125 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n", pVM->fRecompileUser, pVM->fRecompileSupervisor));
126
127#ifdef VBOX_WITH_REM
128 /*
129 * Initialize the REM critical section.
130 */
131 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
132 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
133 AssertRCReturn(rc, rc);
134#endif
135
136 /*
137 * Saved state.
138 */
139 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
140 NULL, NULL, NULL,
141 NULL, emR3Save, NULL,
142 NULL, emR3Load, NULL);
143 if (RT_FAILURE(rc))
144 return rc;
145
146 for (VMCPUID i = 0; i < pVM->cCpus; i++)
147 {
148 PVMCPU pVCpu = &pVM->aCpus[i];
149
150 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
151
152 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
153 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
154 pVCpu->em.s.fForceRAW = false;
155
156 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
157 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
158 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
159
160 /* Force reset of the time slice. */
161 pVCpu->em.s.u64TimeSliceStart = 0;
162
163# define EM_REG_COUNTER(a, b, c) \
164 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
165 AssertRC(rc);
166
167# define EM_REG_COUNTER_USED(a, b, c) \
168 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
169 AssertRC(rc);
170
171# define EM_REG_PROFILE(a, b, c) \
172 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
173 AssertRC(rc);
174
175# define EM_REG_PROFILE_ADV(a, b, c) \
176 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
177 AssertRC(rc);
178
179 /*
180 * Statistics.
181 */
182#ifdef VBOX_WITH_STATISTICS
183 PEMSTATS pStats;
184 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
185 if (RT_FAILURE(rc))
186 return rc;
187
188 pVCpu->em.s.pStatsR3 = pStats;
189 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
190 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
191
192 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
193 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
194
195 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
196 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
197
198 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
199 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
200 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
201 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
202 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
203 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
204 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
205 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
206 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
207 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
208 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
209 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
210 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
211 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
212 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
213 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
214 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
215 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
270
271 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
272 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
273
274 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
324
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
353
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
358
359 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
360 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
361 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
362 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
363 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "Number of restarted i/o instructions.");
364 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
365 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
366 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
367 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
368 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
369 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
370 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
371 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
372 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
373 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
374 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
375 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
376 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
377 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
378 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
379 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
380 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
381 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
382 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
383 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
384 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
385 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
386
387 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
388 pVCpu->em.s.pCliStatTree = 0;
389
390 /* these should be considered for release statistics. */
391 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
392 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
393 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
394 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
395 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
396 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
397 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
398 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
399 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
400 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
401
402#endif /* VBOX_WITH_STATISTICS */
403
404 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
405 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
406 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
407 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
408 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
409
410 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
411 }
412
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * Applies relocations to data and code managed by this
419 * component. This function will be called at init and
420 * whenever the VMM need to relocate it self inside the GC.
421 *
422 * @param pVM Pointer to the VM.
423 */
424VMMR3DECL(void) EMR3Relocate(PVM pVM)
425{
426 LogFlow(("EMR3Relocate\n"));
427 for (VMCPUID i = 0; i < pVM->cCpus; i++)
428 {
429 PVMCPU pVCpu = &pVM->aCpus[i];
430 if (pVCpu->em.s.pStatsR3)
431 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
432 }
433}
434
435
436/**
437 * Reset the EM state for a CPU.
438 *
439 * Called by EMR3Reset and hot plugging.
440 *
441 * @param pVCpu Pointer to the VMCPU.
442 */
443VMMR3DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
444{
445 pVCpu->em.s.fForceRAW = false;
446
447 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
448 out of the HALTED state here so that enmPrevState doesn't end up as
449 HALTED when EMR3Execute returns. */
450 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
451 {
452 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
453 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
454 }
455}
456
457
458/**
459 * Reset notification.
460 *
461 * @param pVM Pointer to the VM.
462 */
463VMMR3DECL(void) EMR3Reset(PVM pVM)
464{
465 Log(("EMR3Reset: \n"));
466 for (VMCPUID i = 0; i < pVM->cCpus; i++)
467 EMR3ResetCpu(&pVM->aCpus[i]);
468}
469
470
471/**
472 * Terminates the EM.
473 *
474 * Termination means cleaning up and freeing all resources,
475 * the VM it self is at this point powered off or suspended.
476 *
477 * @returns VBox status code.
478 * @param pVM Pointer to the VM.
479 */
480VMMR3DECL(int) EMR3Term(PVM pVM)
481{
482 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
483
484#ifdef VBOX_WITH_REM
485 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
486#endif
487 return VINF_SUCCESS;
488}
489
490
491/**
492 * Execute state save operation.
493 *
494 * @returns VBox status code.
495 * @param pVM Pointer to the VM.
496 * @param pSSM SSM operation handle.
497 */
498static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
499{
500 for (VMCPUID i = 0; i < pVM->cCpus; i++)
501 {
502 PVMCPU pVCpu = &pVM->aCpus[i];
503
504 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
505 AssertRCReturn(rc, rc);
506
507 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
508 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
509 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
510 AssertRCReturn(rc, rc);
511
512 /* Save mwait state. */
513 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
514 AssertRCReturn(rc, rc);
515 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
516 AssertRCReturn(rc, rc);
517 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
518 AssertRCReturn(rc, rc);
519 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
520 AssertRCReturn(rc, rc);
521 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
522 AssertRCReturn(rc, rc);
523 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
524 AssertRCReturn(rc, rc);
525 }
526 return VINF_SUCCESS;
527}
528
529
530/**
531 * Execute state load operation.
532 *
533 * @returns VBox status code.
534 * @param pVM Pointer to the VM.
535 * @param pSSM SSM operation handle.
536 * @param uVersion Data layout version.
537 * @param uPass The data pass.
538 */
539static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
540{
541 /*
542 * Validate version.
543 */
544 if ( uVersion != EM_SAVED_STATE_VERSION
545 && uVersion != EM_SAVED_STATE_VERSION_PRE_MWAIT
546 && uVersion != EM_SAVED_STATE_VERSION_PRE_SMP)
547 {
548 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
549 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
550 }
551 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
552
553 /*
554 * Load the saved state.
555 */
556 for (VMCPUID i = 0; i < pVM->cCpus; i++)
557 {
558 PVMCPU pVCpu = &pVM->aCpus[i];
559
560 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
561 if (RT_FAILURE(rc))
562 pVCpu->em.s.fForceRAW = false;
563 AssertRCReturn(rc, rc);
564
565 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
566 {
567 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
568 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
569 AssertRCReturn(rc, rc);
570 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
571
572 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
573 }
574 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
575 {
576 /* Load mwait state. */
577 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
578 AssertRCReturn(rc, rc);
579 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
580 AssertRCReturn(rc, rc);
581 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
582 AssertRCReturn(rc, rc);
583 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
584 AssertRCReturn(rc, rc);
585 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
586 AssertRCReturn(rc, rc);
587 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
588 AssertRCReturn(rc, rc);
589 }
590
591 Assert(!pVCpu->em.s.pCliStatTree);
592 }
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Argument packet for emR3SetExecutionPolicy.
599 */
600struct EMR3SETEXECPOLICYARGS
601{
602 EMEXECPOLICY enmPolicy;
603 bool fEnforce;
604};
605
606
607/**
608 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
609 */
610static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
611{
612 /*
613 * Only the first CPU changes the variables.
614 */
615 if (pVCpu->idCpu == 0)
616 {
617 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
618 switch (pArgs->enmPolicy)
619 {
620 case EMEXECPOLICY_RECOMPILE_RING0:
621 pVM->fRecompileSupervisor = pArgs->fEnforce;
622 break;
623 case EMEXECPOLICY_RECOMPILE_RING3:
624 pVM->fRecompileUser = pArgs->fEnforce;
625 break;
626 default:
627 AssertFailedReturn(VERR_INVALID_PARAMETER);
628 }
629 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n",
630 pVM->fRecompileUser, pVM->fRecompileSupervisor));
631 }
632
633 /*
634 * Force rescheduling if in RAW, HWACCM or REM.
635 */
636 return pVCpu->em.s.enmState == EMSTATE_RAW
637 || pVCpu->em.s.enmState == EMSTATE_HWACC
638 || pVCpu->em.s.enmState == EMSTATE_REM
639 ? VINF_EM_RESCHEDULE
640 : VINF_SUCCESS;
641}
642
643
644/**
645 * Changes a the execution scheduling policy.
646 *
647 * This is used to enable or disable raw-mode / hardware-virtualization
648 * execution of user and supervisor code.
649 *
650 * @returns VINF_SUCCESS on success.
651 * @returns VINF_RESCHEDULE if a rescheduling might be required.
652 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
653 *
654 * @param pVM Pointer to the VM.
655 * @param enmPolicy The scheduling policy to change.
656 * @param fEnforce Whether to enforce the policy or not.
657 */
658VMMR3DECL(int) EMR3SetExecutionPolicy(PVM pVM, EMEXECPOLICY enmPolicy, bool fEnforce)
659{
660 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
661 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
662
663 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
664 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
665}
666
667
668/**
669 * Raise a fatal error.
670 *
671 * Safely terminate the VM with full state report and stuff. This function
672 * will naturally never return.
673 *
674 * @param pVCpu Pointer to the VMCPU.
675 * @param rc VBox status code.
676 */
677VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
678{
679 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
680 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
681 AssertReleaseMsgFailed(("longjmp returned!\n"));
682}
683
684
685#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
686/**
687 * Gets the EM state name.
688 *
689 * @returns pointer to read only state name,
690 * @param enmState The state.
691 */
692static const char *emR3GetStateName(EMSTATE enmState)
693{
694 switch (enmState)
695 {
696 case EMSTATE_NONE: return "EMSTATE_NONE";
697 case EMSTATE_RAW: return "EMSTATE_RAW";
698 case EMSTATE_HWACC: return "EMSTATE_HWACC";
699 case EMSTATE_REM: return "EMSTATE_REM";
700 case EMSTATE_HALTED: return "EMSTATE_HALTED";
701 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
702 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
703 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
704 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
705 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
706 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
707 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
708 default: return "Unknown!";
709 }
710}
711#endif /* LOG_ENABLED || VBOX_STRICT */
712
713
714/**
715 * Debug loop.
716 *
717 * @returns VBox status code for EM.
718 * @param pVM Pointer to the VM.
719 * @param pVCpu Pointer to the VMCPU.
720 * @param rc Current EM VBox status code.
721 */
722static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
723{
724 for (;;)
725 {
726 Log(("emR3Debug: rc=%Rrc\n", rc));
727 const int rcLast = rc;
728
729 /*
730 * Debug related RC.
731 */
732 switch (rc)
733 {
734 /*
735 * Single step an instruction.
736 */
737 case VINF_EM_DBG_STEP:
738 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
739 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
740 || pVCpu->em.s.fForceRAW /* paranoia */)
741 rc = emR3RawStep(pVM, pVCpu);
742 else
743 {
744 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
745 rc = emR3RemStep(pVM, pVCpu);
746 }
747 break;
748
749 /*
750 * Simple events: stepped, breakpoint, stop/assertion.
751 */
752 case VINF_EM_DBG_STEPPED:
753 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
754 break;
755
756 case VINF_EM_DBG_BREAKPOINT:
757 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
758 break;
759
760 case VINF_EM_DBG_STOP:
761 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
762 break;
763
764 case VINF_EM_DBG_HYPER_STEPPED:
765 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
766 break;
767
768 case VINF_EM_DBG_HYPER_BREAKPOINT:
769 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
770 break;
771
772 case VINF_EM_DBG_HYPER_ASSERTION:
773 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
774 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
775 break;
776
777 /*
778 * Guru meditation.
779 */
780 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
781 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
782 break;
783 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
784 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
785 break;
786
787 default: /** @todo don't use default for guru, but make special errors code! */
788 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
789 break;
790 }
791
792 /*
793 * Process the result.
794 */
795 do
796 {
797 switch (rc)
798 {
799 /*
800 * Continue the debugging loop.
801 */
802 case VINF_EM_DBG_STEP:
803 case VINF_EM_DBG_STOP:
804 case VINF_EM_DBG_STEPPED:
805 case VINF_EM_DBG_BREAKPOINT:
806 case VINF_EM_DBG_HYPER_STEPPED:
807 case VINF_EM_DBG_HYPER_BREAKPOINT:
808 case VINF_EM_DBG_HYPER_ASSERTION:
809 break;
810
811 /*
812 * Resuming execution (in some form) has to be done here if we got
813 * a hypervisor debug event.
814 */
815 case VINF_SUCCESS:
816 case VINF_EM_RESUME:
817 case VINF_EM_SUSPEND:
818 case VINF_EM_RESCHEDULE:
819 case VINF_EM_RESCHEDULE_RAW:
820 case VINF_EM_RESCHEDULE_REM:
821 case VINF_EM_HALT:
822 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
823 {
824 rc = emR3RawResumeHyper(pVM, pVCpu);
825 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
826 continue;
827 }
828 if (rc == VINF_SUCCESS)
829 rc = VINF_EM_RESCHEDULE;
830 return rc;
831
832 /*
833 * The debugger isn't attached.
834 * We'll simply turn the thing off since that's the easiest thing to do.
835 */
836 case VERR_DBGF_NOT_ATTACHED:
837 switch (rcLast)
838 {
839 case VINF_EM_DBG_HYPER_STEPPED:
840 case VINF_EM_DBG_HYPER_BREAKPOINT:
841 case VINF_EM_DBG_HYPER_ASSERTION:
842 case VERR_TRPM_PANIC:
843 case VERR_TRPM_DONT_PANIC:
844 case VERR_VMM_RING0_ASSERTION:
845 case VERR_VMM_HYPER_CR3_MISMATCH:
846 case VERR_VMM_RING3_CALL_DISABLED:
847 return rcLast;
848 }
849 return VINF_EM_OFF;
850
851 /*
852 * Status codes terminating the VM in one or another sense.
853 */
854 case VINF_EM_TERMINATE:
855 case VINF_EM_OFF:
856 case VINF_EM_RESET:
857 case VINF_EM_NO_MEMORY:
858 case VINF_EM_RAW_STALE_SELECTOR:
859 case VINF_EM_RAW_IRET_TRAP:
860 case VERR_TRPM_PANIC:
861 case VERR_TRPM_DONT_PANIC:
862 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
863 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
864 case VERR_VMM_RING0_ASSERTION:
865 case VERR_VMM_HYPER_CR3_MISMATCH:
866 case VERR_VMM_RING3_CALL_DISABLED:
867 case VERR_INTERNAL_ERROR:
868 case VERR_INTERNAL_ERROR_2:
869 case VERR_INTERNAL_ERROR_3:
870 case VERR_INTERNAL_ERROR_4:
871 case VERR_INTERNAL_ERROR_5:
872 case VERR_IPE_UNEXPECTED_STATUS:
873 case VERR_IPE_UNEXPECTED_INFO_STATUS:
874 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
875 return rc;
876
877 /*
878 * The rest is unexpected, and will keep us here.
879 */
880 default:
881 AssertMsgFailed(("Unexpected rc %Rrc!\n", rc));
882 break;
883 }
884 } while (false);
885 } /* debug for ever */
886}
887
888/**
889 * Steps recompiled code.
890 *
891 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
892 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
893 *
894 * @param pVM Pointer to the VM.
895 * @param pVCpu Pointer to the VMCPU.
896 */
897static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
898{
899 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
900
901#ifdef VBOX_WITH_REM
902 EMRemLock(pVM);
903
904 /*
905 * Switch to REM, step instruction, switch back.
906 */
907 int rc = REMR3State(pVM, pVCpu);
908 if (RT_SUCCESS(rc))
909 {
910 rc = REMR3Step(pVM, pVCpu);
911 REMR3StateBack(pVM, pVCpu);
912 }
913 EMRemUnlock(pVM);
914
915#else
916 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
917#endif
918
919 LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
920 return rc;
921}
922
923
924/**
925 * emR3RemExecute helper that syncs the state back from REM and leave the REM
926 * critical section.
927 *
928 * @returns false - new fInREMState value.
929 * @param pVM Pointer to the VM.
930 * @param pVCpu Pointer to the VMCPU.
931 */
932DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
933{
934#ifdef VBOX_WITH_REM
935 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
936 REMR3StateBack(pVM, pVCpu);
937 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
938
939 EMRemUnlock(pVM);
940#endif
941 return false;
942}
943
944
945/**
946 * Executes recompiled code.
947 *
948 * This function contains the recompiler version of the inner
949 * execution loop (the outer loop being in EMR3ExecuteVM()).
950 *
951 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
952 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
953 *
954 * @param pVM Pointer to the VM.
955 * @param pVCpu Pointer to the VMCPU.
956 * @param pfFFDone Where to store an indicator telling whether or not
957 * FFs were done before returning.
958 *
959 */
960static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
961{
962#ifdef LOG_ENABLED
963 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
964 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
965
966 if (pCtx->eflags.Bits.u1VM)
967 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
968 else
969 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
970#endif
971 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
972
973#if defined(VBOX_STRICT) && defined(DEBUG_bird)
974 AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
975 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
976 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
977#endif
978
979 /*
980 * Spin till we get a forced action which returns anything but VINF_SUCCESS
981 * or the REM suggests raw-mode execution.
982 */
983 *pfFFDone = false;
984#ifdef VBOX_WITH_REM
985 bool fInREMState = false;
986#endif
987 int rc = VINF_SUCCESS;
988 for (;;)
989 {
990#ifdef VBOX_WITH_REM
991 /*
992 * Lock REM and update the state if not already in sync.
993 *
994 * Note! Big lock, but you are not supposed to own any lock when
995 * coming in here.
996 */
997 if (!fInREMState)
998 {
999 EMRemLock(pVM);
1000 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1001
1002 /* Flush the recompiler translation blocks if the VCPU has changed,
1003 also force a full CPU state resync. */
1004 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1005 {
1006 REMFlushTBs(pVM);
1007 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1008 }
1009 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1010
1011 rc = REMR3State(pVM, pVCpu);
1012
1013 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1014 if (RT_FAILURE(rc))
1015 break;
1016 fInREMState = true;
1017
1018 /*
1019 * We might have missed the raising of VMREQ, TIMER and some other
1020 * important FFs while we were busy switching the state. So, check again.
1021 */
1022 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1023 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1024 {
1025 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1026 goto l_REMDoForcedActions;
1027 }
1028 }
1029#endif
1030
1031 /*
1032 * Execute REM.
1033 */
1034 if (RT_LIKELY(EMR3IsExecutionAllowed(pVM, pVCpu)))
1035 {
1036 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1037#ifdef VBOX_WITH_REM
1038 rc = REMR3Run(pVM, pVCpu);
1039#else
1040 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
1041#endif
1042 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1043 }
1044 else
1045 {
1046 /* Give up this time slice; virtual time continues */
1047 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1048 RTThreadSleep(5);
1049 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1050 rc = VINF_SUCCESS;
1051 }
1052
1053 /*
1054 * Deal with high priority post execution FFs before doing anything
1055 * else. Sync back the state and leave the lock to be on the safe side.
1056 */
1057 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1058 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1059 {
1060#ifdef VBOX_WITH_REM
1061 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1062#endif
1063 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1064 }
1065
1066 /*
1067 * Process the returned status code.
1068 */
1069 if (rc != VINF_SUCCESS)
1070 {
1071 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1072 break;
1073 if (rc != VINF_REM_INTERRUPED_FF)
1074 {
1075 /*
1076 * Anything which is not known to us means an internal error
1077 * and the termination of the VM!
1078 */
1079 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1080 break;
1081 }
1082 }
1083
1084
1085 /*
1086 * Check and execute forced actions.
1087 *
1088 * Sync back the VM state and leave the lock before calling any of
1089 * these, you never know what's going to happen here.
1090 */
1091#ifdef VBOX_HIGH_RES_TIMERS_HACK
1092 TMTimerPollVoid(pVM, pVCpu);
1093#endif
1094 AssertCompile((VMCPU_FF_ALL_REM_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
1095 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1096 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
1097 {
1098l_REMDoForcedActions:
1099#ifdef VBOX_WITH_REM
1100 if (fInREMState)
1101 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1102#endif
1103 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1104 rc = emR3ForcedActions(pVM, pVCpu, rc);
1105 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1106 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1107 if ( rc != VINF_SUCCESS
1108 && rc != VINF_EM_RESCHEDULE_REM)
1109 {
1110 *pfFFDone = true;
1111 break;
1112 }
1113 }
1114
1115 } /* The Inner Loop, recompiled execution mode version. */
1116
1117
1118#ifdef VBOX_WITH_REM
1119 /*
1120 * Returning. Sync back the VM state if required.
1121 */
1122 if (fInREMState)
1123 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1124#endif
1125
1126 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1127 return rc;
1128}
1129
1130
1131#ifdef DEBUG
1132
1133int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1134{
1135 EMSTATE enmOldState = pVCpu->em.s.enmState;
1136
1137 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1138
1139 Log(("Single step BEGIN:\n"));
1140 for (uint32_t i = 0; i < cIterations; i++)
1141 {
1142 DBGFR3PrgStep(pVCpu);
1143 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1144 emR3RemStep(pVM, pVCpu);
1145 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1146 break;
1147 }
1148 Log(("Single step END:\n"));
1149 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1150 pVCpu->em.s.enmState = enmOldState;
1151 return VINF_EM_RESCHEDULE;
1152}
1153
1154#endif /* DEBUG */
1155
1156
1157/**
1158 * Decides whether to execute RAW, HWACC or REM.
1159 *
1160 * @returns new EM state
1161 * @param pVM Pointer to the VM.
1162 * @param pVCpu Pointer to the VMCPU.
1163 * @param pCtx Pointer to the guest CPU context.
1164 */
1165EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1166{
1167#ifdef IEM_VERIFICATION_MODE
1168 return EMSTATE_REM;
1169#else
1170
1171 /*
1172 * When forcing raw-mode execution, things are simple.
1173 */
1174 if (pVCpu->em.s.fForceRAW)
1175 return EMSTATE_RAW;
1176
1177 /*
1178 * We stay in the wait for SIPI state unless explicitly told otherwise.
1179 */
1180 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1181 return EMSTATE_WAIT_SIPI;
1182
1183 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1184 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1185 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1186
1187 X86EFLAGS EFlags = pCtx->eflags;
1188 if (HWACCMIsEnabled(pVM))
1189 {
1190 /*
1191 * Hardware accelerated raw-mode:
1192 *
1193 * Typically only 32-bits protected mode, with paging enabled, code is
1194 * allowed here.
1195 */
1196 if ( EMIsHwVirtExecutionEnabled(pVM)
1197 && HWACCMR3CanExecuteGuest(pVM, pCtx))
1198 return EMSTATE_HWACC;
1199
1200 /*
1201 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1202 * turns off monitoring features essential for raw mode!
1203 */
1204 return EMSTATE_REM;
1205 }
1206
1207 /*
1208 * Standard raw-mode:
1209 *
1210 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1211 * or 32 bits protected mode ring 0 code
1212 *
1213 * The tests are ordered by the likelihood of being true during normal execution.
1214 */
1215 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1216 {
1217 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1218 return EMSTATE_REM;
1219 }
1220
1221# ifndef VBOX_RAW_V86
1222 if (EFlags.u32 & X86_EFL_VM) {
1223 Log2(("raw mode refused: VM_MASK\n"));
1224 return EMSTATE_REM;
1225 }
1226# endif
1227
1228 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1229 uint32_t u32CR0 = pCtx->cr0;
1230 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1231 {
1232 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1233 return EMSTATE_REM;
1234 }
1235
1236 if (pCtx->cr4 & X86_CR4_PAE)
1237 {
1238 uint32_t u32Dummy, u32Features;
1239
1240 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1241 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1242 return EMSTATE_REM;
1243 }
1244
1245 unsigned uSS = pCtx->ss.Sel;
1246 if ( pCtx->eflags.Bits.u1VM
1247 || (uSS & X86_SEL_RPL) == 3)
1248 {
1249 if (!EMIsRawRing3Enabled(pVM))
1250 return EMSTATE_REM;
1251
1252 if (!(EFlags.u32 & X86_EFL_IF))
1253 {
1254 Log2(("raw mode refused: IF (RawR3)\n"));
1255 return EMSTATE_REM;
1256 }
1257
1258 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1259 {
1260 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1261 return EMSTATE_REM;
1262 }
1263 }
1264 else
1265 {
1266 if (!EMIsRawRing0Enabled(pVM))
1267 return EMSTATE_REM;
1268
1269 /* Only ring 0 supervisor code. */
1270 if ((uSS & X86_SEL_RPL) != 0)
1271 {
1272 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1273 return EMSTATE_REM;
1274 }
1275
1276 // Let's start with pure 32 bits ring 0 code first
1277 /** @todo What's pure 32-bit mode? flat? */
1278 if ( !(pCtx->ss.Attr.n.u1DefBig)
1279 || !(pCtx->cs.Attr.n.u1DefBig))
1280 {
1281 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1282 return EMSTATE_REM;
1283 }
1284
1285 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1286 if (!(u32CR0 & X86_CR0_WP))
1287 {
1288 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1289 return EMSTATE_REM;
1290 }
1291
1292 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1293 {
1294 Log2(("raw r0 mode forced: patch code\n"));
1295 return EMSTATE_RAW;
1296 }
1297
1298# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1299 if (!(EFlags.u32 & X86_EFL_IF))
1300 {
1301 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1302 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1303 return EMSTATE_REM;
1304 }
1305# endif
1306
1307 /** @todo still necessary??? */
1308 if (EFlags.Bits.u2IOPL != 0)
1309 {
1310 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1311 return EMSTATE_REM;
1312 }
1313 }
1314
1315 /*
1316 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1317 */
1318 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1319 {
1320 Log2(("raw mode refused: stale CS\n"));
1321 return EMSTATE_REM;
1322 }
1323 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1324 {
1325 Log2(("raw mode refused: stale SS\n"));
1326 return EMSTATE_REM;
1327 }
1328 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1329 {
1330 Log2(("raw mode refused: stale DS\n"));
1331 return EMSTATE_REM;
1332 }
1333 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1334 {
1335 Log2(("raw mode refused: stale ES\n"));
1336 return EMSTATE_REM;
1337 }
1338 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1339 {
1340 Log2(("raw mode refused: stale FS\n"));
1341 return EMSTATE_REM;
1342 }
1343 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1344 {
1345 Log2(("raw mode refused: stale GS\n"));
1346 return EMSTATE_REM;
1347 }
1348
1349 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1350 return EMSTATE_RAW;
1351#endif /* !IEM_VERIFICATION_MODE */
1352
1353}
1354
1355
1356/**
1357 * Executes all high priority post execution force actions.
1358 *
1359 * @returns rc or a fatal status code.
1360 *
1361 * @param pVM Pointer to the VM.
1362 * @param pVCpu Pointer to the VMCPU.
1363 * @param rc The current rc.
1364 */
1365int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1366{
1367 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1368
1369 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1370 PDMCritSectFF(pVCpu);
1371
1372 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1373 CSAMR3DoPendingAction(pVM, pVCpu);
1374
1375 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1376 {
1377 if ( rc > VINF_EM_NO_MEMORY
1378 && rc <= VINF_EM_LAST)
1379 rc = VINF_EM_NO_MEMORY;
1380 }
1381
1382 return rc;
1383}
1384
1385
1386/**
1387 * Executes all pending forced actions.
1388 *
1389 * Forced actions can cause execution delays and execution
1390 * rescheduling. The first we deal with using action priority, so
1391 * that for instance pending timers aren't scheduled and ran until
1392 * right before execution. The rescheduling we deal with using
1393 * return codes. The same goes for VM termination, only in that case
1394 * we exit everything.
1395 *
1396 * @returns VBox status code of equal or greater importance/severity than rc.
1397 * The most important ones are: VINF_EM_RESCHEDULE,
1398 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1399 *
1400 * @param pVM Pointer to the VM.
1401 * @param pVCpu Pointer to the VMCPU.
1402 * @param rc The current rc.
1403 *
1404 */
1405int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1406{
1407 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1408#ifdef VBOX_STRICT
1409 int rcIrq = VINF_SUCCESS;
1410#endif
1411 int rc2;
1412#define UPDATE_RC() \
1413 do { \
1414 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1415 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1416 break; \
1417 if (!rc || rc2 < rc) \
1418 rc = rc2; \
1419 } while (0)
1420 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1421
1422 /*
1423 * Post execution chunk first.
1424 */
1425 if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1426 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
1427 {
1428 /*
1429 * EMT Rendezvous (must be serviced before termination).
1430 */
1431 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1432 {
1433 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1434 UPDATE_RC();
1435 /** @todo HACK ALERT! The following test is to make sure EM+TM
1436 * thinks the VM is stopped/reset before the next VM state change
1437 * is made. We need a better solution for this, or at least make it
1438 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1439 * VINF_EM_SUSPEND). */
1440 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1441 {
1442 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1443 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1444 return rc;
1445 }
1446 }
1447
1448 /*
1449 * State change request (cleared by vmR3SetStateLocked).
1450 */
1451 if (VM_FF_ISPENDING(pVM, VM_FF_CHECK_VM_STATE))
1452 {
1453 VMSTATE enmState = VMR3GetState(pVM);
1454 switch (enmState)
1455 {
1456 case VMSTATE_FATAL_ERROR:
1457 case VMSTATE_FATAL_ERROR_LS:
1458 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1459 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1460 return VINF_EM_SUSPEND;
1461
1462 case VMSTATE_DESTROYING:
1463 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1464 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1465 return VINF_EM_TERMINATE;
1466
1467 default:
1468 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1469 }
1470 }
1471
1472 /*
1473 * Debugger Facility polling.
1474 */
1475 if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
1476 {
1477 rc2 = DBGFR3VMMForcedAction(pVM);
1478 UPDATE_RC();
1479 }
1480
1481 /*
1482 * Postponed reset request.
1483 */
1484 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
1485 {
1486 rc2 = VMR3Reset(pVM);
1487 UPDATE_RC();
1488 }
1489
1490 /*
1491 * CSAM page scanning.
1492 */
1493 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1494 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1495 {
1496 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1497
1498 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1499 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1500
1501 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1502 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1503 }
1504
1505 /*
1506 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1507 */
1508 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1509 {
1510 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1511 UPDATE_RC();
1512 if (rc == VINF_EM_NO_MEMORY)
1513 return rc;
1514 }
1515
1516 /* check that we got them all */
1517 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1518 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
1519 }
1520
1521 /*
1522 * Normal priority then.
1523 * (Executed in no particular order.)
1524 */
1525 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1526 {
1527 /*
1528 * PDM Queues are pending.
1529 */
1530 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1531 PDMR3QueueFlushAll(pVM);
1532
1533 /*
1534 * PDM DMA transfers are pending.
1535 */
1536 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1537 PDMR3DmaRun(pVM);
1538
1539 /*
1540 * EMT Rendezvous (make sure they are handled before the requests).
1541 */
1542 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1543 {
1544 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1545 UPDATE_RC();
1546 /** @todo HACK ALERT! The following test is to make sure EM+TM
1547 * thinks the VM is stopped/reset before the next VM state change
1548 * is made. We need a better solution for this, or at least make it
1549 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1550 * VINF_EM_SUSPEND). */
1551 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1552 {
1553 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1554 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1555 return rc;
1556 }
1557 }
1558
1559 /*
1560 * Requests from other threads.
1561 */
1562 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1563 {
1564 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1565 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1566 {
1567 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1568 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1569 return rc2;
1570 }
1571 UPDATE_RC();
1572 /** @todo HACK ALERT! The following test is to make sure EM+TM
1573 * thinks the VM is stopped/reset before the next VM state change
1574 * is made. We need a better solution for this, or at least make it
1575 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1576 * VINF_EM_SUSPEND). */
1577 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1578 {
1579 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1580 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1581 return rc;
1582 }
1583 }
1584
1585#ifdef VBOX_WITH_REM
1586 /* Replay the handler notification changes. */
1587 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1588 {
1589 /* Try not to cause deadlocks. */
1590 if ( pVM->cCpus == 1
1591 || ( !PGMIsLockOwner(pVM)
1592 && !IOMIsLockOwner(pVM))
1593 )
1594 {
1595 EMRemLock(pVM);
1596 REMR3ReplayHandlerNotifications(pVM);
1597 EMRemUnlock(pVM);
1598 }
1599 }
1600#endif
1601
1602 /* check that we got them all */
1603 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1604 }
1605
1606 /*
1607 * Normal priority then. (per-VCPU)
1608 * (Executed in no particular order.)
1609 */
1610 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1611 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1612 {
1613 /*
1614 * Requests from other threads.
1615 */
1616 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
1617 {
1618 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1619 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1620 {
1621 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1622 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1623 return rc2;
1624 }
1625 UPDATE_RC();
1626 /** @todo HACK ALERT! The following test is to make sure EM+TM
1627 * thinks the VM is stopped/reset before the next VM state change
1628 * is made. We need a better solution for this, or at least make it
1629 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1630 * VINF_EM_SUSPEND). */
1631 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1632 {
1633 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1634 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1635 return rc;
1636 }
1637 }
1638
1639 /* check that we got them all */
1640 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
1641 }
1642
1643 /*
1644 * High priority pre execution chunk last.
1645 * (Executed in ascending priority order.)
1646 */
1647 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1648 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1649 {
1650 /*
1651 * Timers before interrupts.
1652 */
1653 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
1654 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1655 TMR3TimerQueuesDo(pVM);
1656
1657 /*
1658 * The instruction following an emulated STI should *always* be executed!
1659 *
1660 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1661 * the eip is the same as the inhibited instr address. Before we
1662 * are able to execute this instruction in raw mode (iret to
1663 * guest code) an external interrupt might force a world switch
1664 * again. Possibly allowing a guest interrupt to be dispatched
1665 * in the process. This could break the guest. Sounds very
1666 * unlikely, but such timing sensitive problem are not as rare as
1667 * you might think.
1668 */
1669 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1670 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1671 {
1672 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1673 {
1674 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1675 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1676 }
1677 else
1678 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1679 }
1680
1681 /*
1682 * Interrupts.
1683 */
1684 bool fWakeupPending = false;
1685 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1686 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1687 && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
1688 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1689 && PATMAreInterruptsEnabled(pVM)
1690 && !HWACCMR3IsEventPending(pVCpu))
1691 {
1692 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1693 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1694 {
1695 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1696 /** @todo this really isn't nice, should properly handle this */
1697 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1698#ifdef VBOX_STRICT
1699 rcIrq = rc2;
1700#endif
1701 UPDATE_RC();
1702 /* Reschedule required: We must not miss the wakeup below! */
1703 fWakeupPending = true;
1704 }
1705#ifdef VBOX_WITH_REM
1706 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1707 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1708 {
1709 rc2 = VINF_EM_RESCHEDULE_REM;
1710 UPDATE_RC();
1711 }
1712#endif
1713 }
1714
1715 /*
1716 * Allocate handy pages.
1717 */
1718 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1719 {
1720 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1721 UPDATE_RC();
1722 }
1723
1724 /*
1725 * Debugger Facility request.
1726 */
1727 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1728 {
1729 rc2 = DBGFR3VMMForcedAction(pVM);
1730 UPDATE_RC();
1731 }
1732
1733 /*
1734 * EMT Rendezvous (must be serviced before termination).
1735 */
1736 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1737 && VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1738 {
1739 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1740 UPDATE_RC();
1741 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1742 * stopped/reset before the next VM state change is made. We need a better
1743 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1744 * && rc >= VINF_EM_SUSPEND). */
1745 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1746 {
1747 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1748 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1749 return rc;
1750 }
1751 }
1752
1753 /*
1754 * State change request (cleared by vmR3SetStateLocked).
1755 */
1756 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1757 && VM_FF_ISPENDING(pVM, VM_FF_CHECK_VM_STATE))
1758 {
1759 VMSTATE enmState = VMR3GetState(pVM);
1760 switch (enmState)
1761 {
1762 case VMSTATE_FATAL_ERROR:
1763 case VMSTATE_FATAL_ERROR_LS:
1764 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1765 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1766 return VINF_EM_SUSPEND;
1767
1768 case VMSTATE_DESTROYING:
1769 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1770 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1771 return VINF_EM_TERMINATE;
1772
1773 default:
1774 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1775 }
1776 }
1777
1778 /*
1779 * Out of memory? Since most of our fellow high priority actions may cause us
1780 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1781 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1782 * than us since we can terminate without allocating more memory.
1783 */
1784 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1785 {
1786 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1787 UPDATE_RC();
1788 if (rc == VINF_EM_NO_MEMORY)
1789 return rc;
1790 }
1791
1792 /*
1793 * If the virtual sync clock is still stopped, make TM restart it.
1794 */
1795 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
1796 TMR3VirtualSyncFF(pVM, pVCpu);
1797
1798#ifdef DEBUG
1799 /*
1800 * Debug, pause the VM.
1801 */
1802 if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
1803 {
1804 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
1805 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
1806 return VINF_EM_SUSPEND;
1807 }
1808#endif
1809
1810 /* check that we got them all */
1811 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1812 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
1813 }
1814
1815#undef UPDATE_RC
1816 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1817 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1818 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
1819 return rc;
1820}
1821
1822
1823/**
1824 * Check if the preset execution time cap restricts guest execution scheduling.
1825 *
1826 * @returns true if allowed, false otherwise
1827 * @param pVM Pointer to the VM.
1828 * @param pVCpu Pointer to the VMCPU.
1829 *
1830 */
1831VMMR3DECL(bool) EMR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
1832{
1833 uint64_t u64UserTime, u64KernelTime;
1834
1835 if ( pVM->uCpuExecutionCap != 100
1836 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
1837 {
1838 uint64_t u64TimeNow = RTTimeMilliTS();
1839 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
1840 {
1841 /* New time slice. */
1842 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
1843 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
1844 pVCpu->em.s.u64TimeSliceExec = 0;
1845 }
1846 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
1847
1848 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
1849 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
1850 return false;
1851 }
1852 return true;
1853}
1854
1855
1856/**
1857 * Execute VM.
1858 *
1859 * This function is the main loop of the VM. The emulation thread
1860 * calls this function when the VM has been successfully constructed
1861 * and we're ready for executing the VM.
1862 *
1863 * Returning from this function means that the VM is turned off or
1864 * suspended (state already saved) and deconstruction is next in line.
1865 *
1866 * All interaction from other thread are done using forced actions
1867 * and signaling of the wait object.
1868 *
1869 * @returns VBox status code, informational status codes may indicate failure.
1870 * @param pVM Pointer to the VM.
1871 * @param pVCpu Pointer to the VMCPU.
1872 */
1873VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
1874{
1875 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
1876 pVM,
1877 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
1878 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
1879 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
1880 pVCpu->em.s.fForceRAW));
1881 VM_ASSERT_EMT(pVM);
1882 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
1883 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
1884 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
1885 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
1886
1887 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
1888 if (rc == 0)
1889 {
1890 /*
1891 * Start the virtual time.
1892 */
1893 TMR3NotifyResume(pVM, pVCpu);
1894
1895 /*
1896 * The Outer Main Loop.
1897 */
1898 bool fFFDone = false;
1899
1900 /* Reschedule right away to start in the right state. */
1901 rc = VINF_SUCCESS;
1902
1903 /* If resuming after a pause or a state load, restore the previous
1904 state or else we'll start executing code. Else, just reschedule. */
1905 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
1906 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1907 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
1908 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
1909 else
1910 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1911
1912 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1913 for (;;)
1914 {
1915 /*
1916 * Before we can schedule anything (we're here because
1917 * scheduling is required) we must service any pending
1918 * forced actions to avoid any pending action causing
1919 * immediate rescheduling upon entering an inner loop
1920 *
1921 * Do forced actions.
1922 */
1923 if ( !fFFDone
1924 && rc != VINF_EM_TERMINATE
1925 && rc != VINF_EM_OFF
1926 && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1927 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
1928 {
1929 rc = emR3ForcedActions(pVM, pVCpu, rc);
1930 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1931 if ( ( rc == VINF_EM_RESCHEDULE_REM
1932 || rc == VINF_EM_RESCHEDULE_HWACC)
1933 && pVCpu->em.s.fForceRAW)
1934 rc = VINF_EM_RESCHEDULE_RAW;
1935 }
1936 else if (fFFDone)
1937 fFFDone = false;
1938
1939 /*
1940 * Now what to do?
1941 */
1942 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
1943 EMSTATE const enmOldState = pVCpu->em.s.enmState;
1944 switch (rc)
1945 {
1946 /*
1947 * Keep doing what we're currently doing.
1948 */
1949 case VINF_SUCCESS:
1950 break;
1951
1952 /*
1953 * Reschedule - to raw-mode execution.
1954 */
1955 case VINF_EM_RESCHEDULE_RAW:
1956 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
1957 pVCpu->em.s.enmState = EMSTATE_RAW;
1958 break;
1959
1960 /*
1961 * Reschedule - to hardware accelerated raw-mode execution.
1962 */
1963 case VINF_EM_RESCHEDULE_HWACC:
1964 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", enmOldState, EMSTATE_HWACC));
1965 Assert(!pVCpu->em.s.fForceRAW);
1966 pVCpu->em.s.enmState = EMSTATE_HWACC;
1967 break;
1968
1969 /*
1970 * Reschedule - to recompiled execution.
1971 */
1972 case VINF_EM_RESCHEDULE_REM:
1973 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
1974 pVCpu->em.s.enmState = EMSTATE_REM;
1975 break;
1976
1977 /*
1978 * Resume.
1979 */
1980 case VINF_EM_RESUME:
1981 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
1982 /* Don't reschedule in the halted or wait for SIPI case. */
1983 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1984 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
1985 {
1986 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
1987 break;
1988 }
1989 /* fall through and get scheduled. */
1990
1991 /*
1992 * Reschedule.
1993 */
1994 case VINF_EM_RESCHEDULE:
1995 {
1996 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1997 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
1998 pVCpu->em.s.enmState = enmState;
1999 break;
2000 }
2001
2002 /*
2003 * Halted.
2004 */
2005 case VINF_EM_HALT:
2006 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2007 pVCpu->em.s.enmState = EMSTATE_HALTED;
2008 break;
2009
2010 /*
2011 * Switch to the wait for SIPI state (application processor only)
2012 */
2013 case VINF_EM_WAIT_SIPI:
2014 Assert(pVCpu->idCpu != 0);
2015 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2016 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2017 break;
2018
2019
2020 /*
2021 * Suspend.
2022 */
2023 case VINF_EM_SUSPEND:
2024 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2025 Assert(enmOldState != EMSTATE_SUSPENDED);
2026 pVCpu->em.s.enmPrevState = enmOldState;
2027 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2028 break;
2029
2030 /*
2031 * Reset.
2032 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2033 */
2034 case VINF_EM_RESET:
2035 {
2036 if (pVCpu->idCpu == 0)
2037 {
2038 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2039 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2040 pVCpu->em.s.enmState = enmState;
2041 }
2042 else
2043 {
2044 /* All other VCPUs go into the wait for SIPI state. */
2045 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2046 }
2047 break;
2048 }
2049
2050 /*
2051 * Power Off.
2052 */
2053 case VINF_EM_OFF:
2054 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2055 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2056 TMR3NotifySuspend(pVM, pVCpu);
2057 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2058 return rc;
2059
2060 /*
2061 * Terminate the VM.
2062 */
2063 case VINF_EM_TERMINATE:
2064 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2065 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2066 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2067 TMR3NotifySuspend(pVM, pVCpu);
2068 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2069 return rc;
2070
2071
2072 /*
2073 * Out of memory, suspend the VM and stuff.
2074 */
2075 case VINF_EM_NO_MEMORY:
2076 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2077 Assert(enmOldState != EMSTATE_SUSPENDED);
2078 pVCpu->em.s.enmPrevState = enmOldState;
2079 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2080 TMR3NotifySuspend(pVM, pVCpu);
2081 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2082
2083 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2084 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2085 if (rc != VINF_EM_SUSPEND)
2086 {
2087 if (RT_SUCCESS_NP(rc))
2088 {
2089 AssertLogRelMsgFailed(("%Rrc\n", rc));
2090 rc = VERR_EM_INTERNAL_ERROR;
2091 }
2092 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2093 }
2094 return rc;
2095
2096 /*
2097 * Guest debug events.
2098 */
2099 case VINF_EM_DBG_STEPPED:
2100 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
2101 case VINF_EM_DBG_STOP:
2102 case VINF_EM_DBG_BREAKPOINT:
2103 case VINF_EM_DBG_STEP:
2104 if (enmOldState == EMSTATE_RAW)
2105 {
2106 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2107 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2108 }
2109 else
2110 {
2111 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2112 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2113 }
2114 break;
2115
2116 /*
2117 * Hypervisor debug events.
2118 */
2119 case VINF_EM_DBG_HYPER_STEPPED:
2120 case VINF_EM_DBG_HYPER_BREAKPOINT:
2121 case VINF_EM_DBG_HYPER_ASSERTION:
2122 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2123 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2124 break;
2125
2126 /*
2127 * Guru mediations.
2128 */
2129 case VERR_VMM_RING0_ASSERTION:
2130 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2131 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2132 break;
2133
2134 /*
2135 * Any error code showing up here other than the ones we
2136 * know and process above are considered to be FATAL.
2137 *
2138 * Unknown warnings and informational status codes are also
2139 * included in this.
2140 */
2141 default:
2142 if (RT_SUCCESS_NP(rc))
2143 {
2144 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2145 rc = VERR_EM_INTERNAL_ERROR;
2146 }
2147 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2148 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2149 break;
2150 }
2151
2152 /*
2153 * Act on state transition.
2154 */
2155 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2156 if (enmOldState != enmNewState)
2157 {
2158 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2159
2160 /* Clear MWait flags. */
2161 if ( enmOldState == EMSTATE_HALTED
2162 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2163 && ( enmNewState == EMSTATE_RAW
2164 || enmNewState == EMSTATE_HWACC
2165 || enmNewState == EMSTATE_REM
2166 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2167 || enmNewState == EMSTATE_DEBUG_GUEST_HWACC
2168 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2169 {
2170 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2171 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2172 }
2173 }
2174 else
2175 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2176
2177 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2178 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2179
2180 /*
2181 * Act on the new state.
2182 */
2183 switch (enmNewState)
2184 {
2185 /*
2186 * Execute raw.
2187 */
2188 case EMSTATE_RAW:
2189#ifndef IEM_VERIFICATION_MODE /* remove later */
2190 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2191 break;
2192#endif
2193
2194 /*
2195 * Execute hardware accelerated raw.
2196 */
2197 case EMSTATE_HWACC:
2198#ifndef IEM_VERIFICATION_MODE /* remove later */
2199 rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
2200 break;
2201#endif
2202
2203 /*
2204 * Execute recompiled.
2205 */
2206 case EMSTATE_REM:
2207#ifdef IEM_VERIFICATION_MODE
2208# if 1
2209 rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); fFFDone = false;
2210# else
2211 rc = VBOXSTRICTRC_TODO(REMR3EmulateInstruction(pVM, pVCpu)); fFFDone = false;
2212 if (rc == VINF_EM_RESCHEDULE)
2213 rc = VINF_SUCCESS;
2214# endif
2215#else
2216 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2217#endif
2218 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2219 break;
2220
2221 /*
2222 * Application processor execution halted until SIPI.
2223 */
2224 case EMSTATE_WAIT_SIPI:
2225 /* no break */
2226 /*
2227 * hlt - execution halted until interrupt.
2228 */
2229 case EMSTATE_HALTED:
2230 {
2231 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2232 /* MWAIT has a special extension where it's woken up when
2233 an interrupt is pending even when IF=0. */
2234 if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2235 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2236 {
2237 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2238 if ( rc == VINF_SUCCESS
2239 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2240 {
2241 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2242 rc = VINF_EM_RESCHEDULE;
2243 }
2244 }
2245 else
2246 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2247
2248 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2249 break;
2250 }
2251
2252 /*
2253 * Suspended - return to VM.cpp.
2254 */
2255 case EMSTATE_SUSPENDED:
2256 TMR3NotifySuspend(pVM, pVCpu);
2257 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2258 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2259 return VINF_EM_SUSPEND;
2260
2261 /*
2262 * Debugging in the guest.
2263 */
2264 case EMSTATE_DEBUG_GUEST_REM:
2265 case EMSTATE_DEBUG_GUEST_RAW:
2266 TMR3NotifySuspend(pVM, pVCpu);
2267 rc = emR3Debug(pVM, pVCpu, rc);
2268 TMR3NotifyResume(pVM, pVCpu);
2269 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2270 break;
2271
2272 /*
2273 * Debugging in the hypervisor.
2274 */
2275 case EMSTATE_DEBUG_HYPER:
2276 {
2277 TMR3NotifySuspend(pVM, pVCpu);
2278 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2279
2280 rc = emR3Debug(pVM, pVCpu, rc);
2281 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2282 if (rc != VINF_SUCCESS)
2283 {
2284 /* switch to guru meditation mode */
2285 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2286 VMMR3FatalDump(pVM, pVCpu, rc);
2287 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2288 return rc;
2289 }
2290
2291 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2292 TMR3NotifyResume(pVM, pVCpu);
2293 break;
2294 }
2295
2296 /*
2297 * Guru meditation takes place in the debugger.
2298 */
2299 case EMSTATE_GURU_MEDITATION:
2300 {
2301 TMR3NotifySuspend(pVM, pVCpu);
2302 VMMR3FatalDump(pVM, pVCpu, rc);
2303 emR3Debug(pVM, pVCpu, rc);
2304 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2305 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2306 return rc;
2307 }
2308
2309 /*
2310 * The states we don't expect here.
2311 */
2312 case EMSTATE_NONE:
2313 case EMSTATE_TERMINATING:
2314 default:
2315 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2316 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2317 TMR3NotifySuspend(pVM, pVCpu);
2318 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2319 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2320 return VERR_EM_INTERNAL_ERROR;
2321 }
2322 } /* The Outer Main Loop */
2323 }
2324 else
2325 {
2326 /*
2327 * Fatal error.
2328 */
2329 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2330 TMR3NotifySuspend(pVM, pVCpu);
2331 VMMR3FatalDump(pVM, pVCpu, rc);
2332 emR3Debug(pVM, pVCpu, rc);
2333 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2334 /** @todo change the VM state! */
2335 return rc;
2336 }
2337
2338 /* (won't ever get here). */
2339 AssertFailed();
2340}
2341
2342/**
2343 * Notify EM of a state change (used by FTM)
2344 *
2345 * @param pVM Pointer to the VM.
2346 */
2347VMMR3DECL(int) EMR3NotifySuspend(PVM pVM)
2348{
2349 PVMCPU pVCpu = VMMGetCpu(pVM);
2350
2351 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2352 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2353 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2354 return VINF_SUCCESS;
2355}
2356
2357/**
2358 * Notify EM of a state change (used by FTM)
2359 *
2360 * @param pVM Pointer to the VM.
2361 */
2362VMMR3DECL(int) EMR3NotifyResume(PVM pVM)
2363{
2364 PVMCPU pVCpu = VMMGetCpu(pVM);
2365 EMSTATE enmCurState = pVCpu->em.s.enmState;
2366
2367 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2368 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2369 pVCpu->em.s.enmPrevState = enmCurState;
2370 return VINF_SUCCESS;
2371}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette