VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 38838

Last change on this file since 38838 was 38838, checked in by vboxsync, 13 years ago

VMM,++: Try fix the async reset, suspend and power-off problems in PDM wrt conflicting VMM requests. Split them into priority requests and normal requests. The priority requests can safely be processed when PDM is doing async state change waits, the normal ones cannot. (The problem I bumped into was a unmap-chunk request from PGM being processed during PDMR3Reset, causing a recursive VMMR3EmtRendezvous deadlock.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 104.2 KB
Line 
1/* $Id: EM.cpp 38838 2011-09-23 11:21:55Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/patm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iom.h>
45#include <VBox/vmm/dbgf.h>
46#include <VBox/vmm/pgm.h>
47#include <VBox/vmm/rem.h>
48#include <VBox/vmm/tm.h>
49#include <VBox/vmm/mm.h>
50#include <VBox/vmm/ssm.h>
51#include <VBox/vmm/pdmapi.h>
52#include <VBox/vmm/pdmcritsect.h>
53#include <VBox/vmm/pdmqueue.h>
54#include <VBox/vmm/hwaccm.h>
55#include <VBox/vmm/patm.h>
56#ifdef IEM_VERIFICATION_MODE
57# include <VBox/vmm/iem.h>
58#endif
59#include "EMInternal.h"
60#include "internal/em.h"
61#include <VBox/vmm/vm.h>
62#include <VBox/vmm/cpumdis.h>
63#include <VBox/dis.h>
64#include <VBox/disopcode.h>
65#include <VBox/vmm/dbgf.h>
66
67#include <iprt/asm.h>
68#include <iprt/string.h>
69#include <iprt/stream.h>
70#include <iprt/thread.h>
71
72
73/*******************************************************************************
74* Defined Constants And Macros *
75*******************************************************************************/
76#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
77#define EM_NOTIFY_HWACCM
78#endif
79
80
81/*******************************************************************************
82* Internal Functions *
83*******************************************************************************/
84static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
85static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
86static const char *emR3GetStateName(EMSTATE enmState);
87static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
88static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
89static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
90int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
91
92
93/**
94 * Initializes the EM.
95 *
96 * @returns VBox status code.
97 * @param pVM The VM to operate on.
98 */
99VMMR3DECL(int) EMR3Init(PVM pVM)
100{
101 LogFlow(("EMR3Init\n"));
102 /*
103 * Assert alignment and sizes.
104 */
105 AssertCompileMemberAlignment(VM, em.s, 32);
106 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
107 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
108 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
109
110 /*
111 * Init the structure.
112 */
113 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
114 bool fEnabled;
115 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &fEnabled);
116 pVM->fRecompileUser = RT_SUCCESS(rc) ? !fEnabled : false;
117 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &fEnabled);
118 pVM->fRecompileSupervisor = RT_SUCCESS(rc) ? !fEnabled : false;
119 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n", pVM->fRecompileUser, pVM->fRecompileSupervisor));
120
121 /*
122 * Initialize the REM critical section.
123 */
124 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
125 AssertRCReturn(rc, rc);
126
127 /*
128 * Saved state.
129 */
130 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
131 NULL, NULL, NULL,
132 NULL, emR3Save, NULL,
133 NULL, emR3Load, NULL);
134 if (RT_FAILURE(rc))
135 return rc;
136
137 for (VMCPUID i = 0; i < pVM->cCpus; i++)
138 {
139 PVMCPU pVCpu = &pVM->aCpus[i];
140
141 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
142
143 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
144 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
145 pVCpu->em.s.fForceRAW = false;
146
147 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
148 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
149 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
150
151 /* Force reset of the time slice. */
152 pVCpu->em.s.u64TimeSliceStart = 0;
153
154# define EM_REG_COUNTER(a, b, c) \
155 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
156 AssertRC(rc);
157
158# define EM_REG_COUNTER_USED(a, b, c) \
159 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
160 AssertRC(rc);
161
162# define EM_REG_PROFILE(a, b, c) \
163 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
164 AssertRC(rc);
165
166# define EM_REG_PROFILE_ADV(a, b, c) \
167 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
168 AssertRC(rc);
169
170 /*
171 * Statistics.
172 */
173#ifdef VBOX_WITH_STATISTICS
174 PEMSTATS pStats;
175 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
176 if (RT_FAILURE(rc))
177 return rc;
178
179 pVCpu->em.s.pStatsR3 = pStats;
180 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
181 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
182
183 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
184 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
185
186 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
187 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
188
189 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
190 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
191 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
192 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
193 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
194 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
195 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
196 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
197 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
198 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
199 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
200 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
201 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
202 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
203 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
204 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
205 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
206 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
207 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
208 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
209 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
210 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
211 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
212 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
213 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
214 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
215 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
261
262 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
263 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
264
265 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
315
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
344
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
349
350 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
351 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
352 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
353 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
354 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "Number of restarted i/o instructions.");
355 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
356 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
357 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
358 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 read instructions.");
359 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 read instructions.");
360 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 read instructions.");
361 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 read instructions.");
362 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 read instructions.");
363 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 write instructions.");
364 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 write instructions.");
365 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 write instructions.");
366 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 write instructions.");
367 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 write instructions.");
368 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
369 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
370 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
371 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
372 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
373 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
374 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
375 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
376 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
377
378 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
379 pVCpu->em.s.pCliStatTree = 0;
380
381 /* these should be considered for release statistics. */
382 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
383 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
384 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
385 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
386 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
387 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
388 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
389 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
390 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
391 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
392
393#endif /* VBOX_WITH_STATISTICS */
394
395 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
396 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
397 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
398 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
399 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
400
401 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
402 }
403
404 return VINF_SUCCESS;
405}
406
407
408/**
409 * Applies relocations to data and code managed by this
410 * component. This function will be called at init and
411 * whenever the VMM need to relocate it self inside the GC.
412 *
413 * @param pVM The VM.
414 */
415VMMR3DECL(void) EMR3Relocate(PVM pVM)
416{
417 LogFlow(("EMR3Relocate\n"));
418 for (VMCPUID i = 0; i < pVM->cCpus; i++)
419 {
420 PVMCPU pVCpu = &pVM->aCpus[i];
421 if (pVCpu->em.s.pStatsR3)
422 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
423 }
424}
425
426
427/**
428 * Reset the EM state for a CPU.
429 *
430 * Called by EMR3Reset and hot plugging.
431 *
432 * @param pVCpu The virtual CPU.
433 */
434VMMR3DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
435{
436 pVCpu->em.s.fForceRAW = false;
437
438 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
439 out of the HALTED state here so that enmPrevState doesn't end up as
440 HALTED when EMR3Execute returns. */
441 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
442 {
443 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
444 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
445 }
446}
447
448
449/**
450 * Reset notification.
451 *
452 * @param pVM The VM handle.
453 */
454VMMR3DECL(void) EMR3Reset(PVM pVM)
455{
456 Log(("EMR3Reset: \n"));
457 for (VMCPUID i = 0; i < pVM->cCpus; i++)
458 EMR3ResetCpu(&pVM->aCpus[i]);
459}
460
461
462/**
463 * Terminates the EM.
464 *
465 * Termination means cleaning up and freeing all resources,
466 * the VM it self is at this point powered off or suspended.
467 *
468 * @returns VBox status code.
469 * @param pVM The VM to operate on.
470 */
471VMMR3DECL(int) EMR3Term(PVM pVM)
472{
473 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
474
475 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Execute state save operation.
482 *
483 * @returns VBox status code.
484 * @param pVM VM Handle.
485 * @param pSSM SSM operation handle.
486 */
487static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
488{
489 for (VMCPUID i = 0; i < pVM->cCpus; i++)
490 {
491 PVMCPU pVCpu = &pVM->aCpus[i];
492
493 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
494 AssertRCReturn(rc, rc);
495
496 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
497 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
498 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
499 AssertRCReturn(rc, rc);
500
501 /* Save mwait state. */
502 rc = SSMR3PutU32(pSSM, pVCpu->em.s.mwait.fWait);
503 AssertRCReturn(rc, rc);
504 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMWaitEAX);
505 AssertRCReturn(rc, rc);
506 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMWaitECX);
507 AssertRCReturn(rc, rc);
508 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorEAX);
509 AssertRCReturn(rc, rc);
510 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorECX);
511 AssertRCReturn(rc, rc);
512 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorEDX);
513 AssertRCReturn(rc, rc);
514 }
515 return VINF_SUCCESS;
516}
517
518
519/**
520 * Execute state load operation.
521 *
522 * @returns VBox status code.
523 * @param pVM VM Handle.
524 * @param pSSM SSM operation handle.
525 * @param uVersion Data layout version.
526 * @param uPass The data pass.
527 */
528static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
529{
530 /*
531 * Validate version.
532 */
533 if ( uVersion != EM_SAVED_STATE_VERSION
534 && uVersion != EM_SAVED_STATE_VERSION_PRE_MWAIT
535 && uVersion != EM_SAVED_STATE_VERSION_PRE_SMP)
536 {
537 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
538 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
539 }
540 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
541
542 /*
543 * Load the saved state.
544 */
545 for (VMCPUID i = 0; i < pVM->cCpus; i++)
546 {
547 PVMCPU pVCpu = &pVM->aCpus[i];
548
549 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
550 if (RT_FAILURE(rc))
551 pVCpu->em.s.fForceRAW = false;
552 AssertRCReturn(rc, rc);
553
554 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
555 {
556 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
557 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
558 AssertRCReturn(rc, rc);
559 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
560
561 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
562 }
563 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
564 {
565 /* Load mwait state. */
566 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.mwait.fWait);
567 AssertRCReturn(rc, rc);
568 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMWaitEAX);
569 AssertRCReturn(rc, rc);
570 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMWaitECX);
571 AssertRCReturn(rc, rc);
572 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorEAX);
573 AssertRCReturn(rc, rc);
574 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorECX);
575 AssertRCReturn(rc, rc);
576 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorEDX);
577 AssertRCReturn(rc, rc);
578 }
579
580 Assert(!pVCpu->em.s.pCliStatTree);
581 }
582 return VINF_SUCCESS;
583}
584
585
586/**
587 * Argument packet for emR3SetExecutionPolicy.
588 */
589struct EMR3SETEXECPOLICYARGS
590{
591 EMEXECPOLICY enmPolicy;
592 bool fEnforce;
593};
594
595
596/**
597 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
598 */
599static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
600{
601 /*
602 * Only the first CPU changes the variables.
603 */
604 if (pVCpu->idCpu == 0)
605 {
606 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
607 switch (pArgs->enmPolicy)
608 {
609 case EMEXECPOLICY_RECOMPILE_RING0:
610 pVM->fRecompileSupervisor = pArgs->fEnforce;
611 break;
612 case EMEXECPOLICY_RECOMPILE_RING3:
613 pVM->fRecompileUser = pArgs->fEnforce;
614 break;
615 default:
616 AssertFailedReturn(VERR_INVALID_PARAMETER);
617 }
618 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n",
619 pVM->fRecompileUser, pVM->fRecompileSupervisor));
620 }
621
622 /*
623 * Force rescheduling if in RAW, HWACCM or REM.
624 */
625 return pVCpu->em.s.enmState == EMSTATE_RAW
626 || pVCpu->em.s.enmState == EMSTATE_HWACC
627 || pVCpu->em.s.enmState == EMSTATE_REM
628 ? VINF_EM_RESCHEDULE
629 : VINF_SUCCESS;
630}
631
632
633/**
634 * Changes a the execution scheduling policy.
635 *
636 * This is used to enable or disable raw-mode / hardware-virtualization
637 * execution of user and supervisor code.
638 *
639 * @returns VINF_SUCCESS on success.
640 * @returns VINF_RESCHEDULE if a rescheduling might be required.
641 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
642 *
643 * @param pVM The VM to operate on.
644 * @param enmPolicy The scheduling policy to change.
645 * @param fEnforce Whether to enforce the policy or not.
646 */
647VMMR3DECL(int) EMR3SetExecutionPolicy(PVM pVM, EMEXECPOLICY enmPolicy, bool fEnforce)
648{
649 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
650 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
651
652 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
653 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
654}
655
656
657/**
658 * Raise a fatal error.
659 *
660 * Safely terminate the VM with full state report and stuff. This function
661 * will naturally never return.
662 *
663 * @param pVCpu VMCPU handle.
664 * @param rc VBox status code.
665 */
666VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
667{
668 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
669 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
670 AssertReleaseMsgFailed(("longjmp returned!\n"));
671}
672
673
674/**
675 * Gets the EM state name.
676 *
677 * @returns pointer to read only state name,
678 * @param enmState The state.
679 */
680static const char *emR3GetStateName(EMSTATE enmState)
681{
682 switch (enmState)
683 {
684 case EMSTATE_NONE: return "EMSTATE_NONE";
685 case EMSTATE_RAW: return "EMSTATE_RAW";
686 case EMSTATE_HWACC: return "EMSTATE_HWACC";
687 case EMSTATE_REM: return "EMSTATE_REM";
688 case EMSTATE_HALTED: return "EMSTATE_HALTED";
689 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
690 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
691 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
692 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
693 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
694 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
695 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
696 default: return "Unknown!";
697 }
698}
699
700
701/**
702 * Debug loop.
703 *
704 * @returns VBox status code for EM.
705 * @param pVM VM handle.
706 * @param pVCpu VMCPU handle.
707 * @param rc Current EM VBox status code..
708 */
709static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
710{
711 for (;;)
712 {
713 Log(("emR3Debug: rc=%Rrc\n", rc));
714 const int rcLast = rc;
715
716 /*
717 * Debug related RC.
718 */
719 switch (rc)
720 {
721 /*
722 * Single step an instruction.
723 */
724 case VINF_EM_DBG_STEP:
725 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
726 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
727 || pVCpu->em.s.fForceRAW /* paranoia */)
728 rc = emR3RawStep(pVM, pVCpu);
729 else
730 {
731 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
732 rc = emR3RemStep(pVM, pVCpu);
733 }
734 break;
735
736 /*
737 * Simple events: stepped, breakpoint, stop/assertion.
738 */
739 case VINF_EM_DBG_STEPPED:
740 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
741 break;
742
743 case VINF_EM_DBG_BREAKPOINT:
744 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
745 break;
746
747 case VINF_EM_DBG_STOP:
748 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
749 break;
750
751 case VINF_EM_DBG_HYPER_STEPPED:
752 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
753 break;
754
755 case VINF_EM_DBG_HYPER_BREAKPOINT:
756 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
757 break;
758
759 case VINF_EM_DBG_HYPER_ASSERTION:
760 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
761 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
762 break;
763
764 /*
765 * Guru meditation.
766 */
767 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
768 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
769 break;
770 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
771 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
772 break;
773
774 default: /** @todo don't use default for guru, but make special errors code! */
775 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
776 break;
777 }
778
779 /*
780 * Process the result.
781 */
782 do
783 {
784 switch (rc)
785 {
786 /*
787 * Continue the debugging loop.
788 */
789 case VINF_EM_DBG_STEP:
790 case VINF_EM_DBG_STOP:
791 case VINF_EM_DBG_STEPPED:
792 case VINF_EM_DBG_BREAKPOINT:
793 case VINF_EM_DBG_HYPER_STEPPED:
794 case VINF_EM_DBG_HYPER_BREAKPOINT:
795 case VINF_EM_DBG_HYPER_ASSERTION:
796 break;
797
798 /*
799 * Resuming execution (in some form) has to be done here if we got
800 * a hypervisor debug event.
801 */
802 case VINF_SUCCESS:
803 case VINF_EM_RESUME:
804 case VINF_EM_SUSPEND:
805 case VINF_EM_RESCHEDULE:
806 case VINF_EM_RESCHEDULE_RAW:
807 case VINF_EM_RESCHEDULE_REM:
808 case VINF_EM_HALT:
809 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
810 {
811 rc = emR3RawResumeHyper(pVM, pVCpu);
812 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
813 continue;
814 }
815 if (rc == VINF_SUCCESS)
816 rc = VINF_EM_RESCHEDULE;
817 return rc;
818
819 /*
820 * The debugger isn't attached.
821 * We'll simply turn the thing off since that's the easiest thing to do.
822 */
823 case VERR_DBGF_NOT_ATTACHED:
824 switch (rcLast)
825 {
826 case VINF_EM_DBG_HYPER_STEPPED:
827 case VINF_EM_DBG_HYPER_BREAKPOINT:
828 case VINF_EM_DBG_HYPER_ASSERTION:
829 case VERR_TRPM_PANIC:
830 case VERR_TRPM_DONT_PANIC:
831 case VERR_VMM_RING0_ASSERTION:
832 case VERR_VMM_HYPER_CR3_MISMATCH:
833 case VERR_VMM_RING3_CALL_DISABLED:
834 return rcLast;
835 }
836 return VINF_EM_OFF;
837
838 /*
839 * Status codes terminating the VM in one or another sense.
840 */
841 case VINF_EM_TERMINATE:
842 case VINF_EM_OFF:
843 case VINF_EM_RESET:
844 case VINF_EM_NO_MEMORY:
845 case VINF_EM_RAW_STALE_SELECTOR:
846 case VINF_EM_RAW_IRET_TRAP:
847 case VERR_TRPM_PANIC:
848 case VERR_TRPM_DONT_PANIC:
849 case VERR_VMM_RING0_ASSERTION:
850 case VERR_VMM_HYPER_CR3_MISMATCH:
851 case VERR_VMM_RING3_CALL_DISABLED:
852 case VERR_INTERNAL_ERROR:
853 case VERR_INTERNAL_ERROR_2:
854 case VERR_INTERNAL_ERROR_3:
855 case VERR_INTERNAL_ERROR_4:
856 case VERR_INTERNAL_ERROR_5:
857 case VERR_IPE_UNEXPECTED_STATUS:
858 case VERR_IPE_UNEXPECTED_INFO_STATUS:
859 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
860 return rc;
861
862 /*
863 * The rest is unexpected, and will keep us here.
864 */
865 default:
866 AssertMsgFailed(("Unexpected rc %Rrc!\n", rc));
867 break;
868 }
869 } while (false);
870 } /* debug for ever */
871}
872
873/**
874 * Steps recompiled code.
875 *
876 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
877 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
878 *
879 * @param pVM VM handle.
880 * @param pVCpu VMCPU handle.
881 */
882static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
883{
884 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
885
886 EMRemLock(pVM);
887
888 /*
889 * Switch to REM, step instruction, switch back.
890 */
891 int rc = REMR3State(pVM, pVCpu);
892 if (RT_SUCCESS(rc))
893 {
894 rc = REMR3Step(pVM, pVCpu);
895 REMR3StateBack(pVM, pVCpu);
896 }
897 EMRemUnlock(pVM);
898
899 LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
900 return rc;
901}
902
903
904/**
905 * emR3RemExecute helper that syncs the state back from REM and leave the REM
906 * critical section.
907 *
908 * @returns false - new fInREMState value.
909 * @param pVM The VM handle.
910 * @param pVCpu The virtual CPU handle.
911 */
912DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
913{
914 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
915 REMR3StateBack(pVM, pVCpu);
916 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
917
918 EMRemUnlock(pVM);
919 return false;
920}
921
922
923/**
924 * Executes recompiled code.
925 *
926 * This function contains the recompiler version of the inner
927 * execution loop (the outer loop being in EMR3ExecuteVM()).
928 *
929 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
930 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
931 *
932 * @param pVM VM handle.
933 * @param pVCpu VMCPU handle.
934 * @param pfFFDone Where to store an indicator telling whether or not
935 * FFs were done before returning.
936 *
937 */
938static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
939{
940#ifdef LOG_ENABLED
941 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
942 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
943
944 if (pCtx->eflags.Bits.u1VM)
945 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
946 else
947 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
948#endif
949 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
950
951#if defined(VBOX_STRICT) && defined(DEBUG_bird)
952 AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
953 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo #1419 - get flat address. */
954 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
955#endif
956
957 /*
958 * Spin till we get a forced action which returns anything but VINF_SUCCESS
959 * or the REM suggests raw-mode execution.
960 */
961 *pfFFDone = false;
962 bool fInREMState = false;
963 int rc = VINF_SUCCESS;
964 for (;;)
965 {
966 /*
967 * Lock REM and update the state if not already in sync.
968 *
969 * Note! Big lock, but you are not supposed to own any lock when
970 * coming in here.
971 */
972 if (!fInREMState)
973 {
974 EMRemLock(pVM);
975 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
976
977 /* Flush the recompiler translation blocks if the VCPU has changed,
978 also force a full CPU state resync. */
979 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
980 {
981 REMFlushTBs(pVM);
982 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
983 }
984 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
985
986 rc = REMR3State(pVM, pVCpu);
987
988 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
989 if (RT_FAILURE(rc))
990 break;
991 fInREMState = true;
992
993 /*
994 * We might have missed the raising of VMREQ, TIMER and some other
995 * important FFs while we were busy switching the state. So, check again.
996 */
997 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
998 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
999 {
1000 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1001 goto l_REMDoForcedActions;
1002 }
1003 }
1004
1005
1006 /*
1007 * Execute REM.
1008 */
1009 if (RT_LIKELY(EMR3IsExecutionAllowed(pVM, pVCpu)))
1010 {
1011 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1012 rc = REMR3Run(pVM, pVCpu);
1013 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1014 }
1015 else
1016 {
1017 /* Give up this time slice; virtual time continues */
1018 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1019 RTThreadSleep(5);
1020 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1021 rc = VINF_SUCCESS;
1022 }
1023
1024 /*
1025 * Deal with high priority post execution FFs before doing anything
1026 * else. Sync back the state and leave the lock to be on the safe side.
1027 */
1028 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1029 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1030 {
1031 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1032 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1033 }
1034
1035 /*
1036 * Process the returned status code.
1037 */
1038 if (rc != VINF_SUCCESS)
1039 {
1040 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1041 break;
1042 if (rc != VINF_REM_INTERRUPED_FF)
1043 {
1044 /*
1045 * Anything which is not known to us means an internal error
1046 * and the termination of the VM!
1047 */
1048 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1049 break;
1050 }
1051 }
1052
1053
1054 /*
1055 * Check and execute forced actions.
1056 *
1057 * Sync back the VM state and leave the lock before calling any of
1058 * these, you never know what's going to happen here.
1059 */
1060#ifdef VBOX_HIGH_RES_TIMERS_HACK
1061 TMTimerPollVoid(pVM, pVCpu);
1062#endif
1063 AssertCompile((VMCPU_FF_ALL_REM_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
1064 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1065 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
1066 {
1067l_REMDoForcedActions:
1068 if (fInREMState)
1069 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1070 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1071 rc = emR3ForcedActions(pVM, pVCpu, rc);
1072 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1073 if ( rc != VINF_SUCCESS
1074 && rc != VINF_EM_RESCHEDULE_REM)
1075 {
1076 *pfFFDone = true;
1077 break;
1078 }
1079 }
1080
1081 } /* The Inner Loop, recompiled execution mode version. */
1082
1083
1084 /*
1085 * Returning. Sync back the VM state if required.
1086 */
1087 if (fInREMState)
1088 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1089
1090 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1091 return rc;
1092}
1093
1094
1095#ifdef DEBUG
1096
1097int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1098{
1099 EMSTATE enmOldState = pVCpu->em.s.enmState;
1100
1101 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1102
1103 Log(("Single step BEGIN:\n"));
1104 for (uint32_t i = 0; i < cIterations; i++)
1105 {
1106 DBGFR3PrgStep(pVCpu);
1107 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1108 emR3RemStep(pVM, pVCpu);
1109 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1110 break;
1111 }
1112 Log(("Single step END:\n"));
1113 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1114 pVCpu->em.s.enmState = enmOldState;
1115 return VINF_EM_RESCHEDULE;
1116}
1117
1118#endif /* DEBUG */
1119
1120
1121/**
1122 * Decides whether to execute RAW, HWACC or REM.
1123 *
1124 * @returns new EM state
1125 * @param pVM The VM.
1126 * @param pVCpu The VMCPU handle.
1127 * @param pCtx The CPU context.
1128 */
1129EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1130{
1131#ifdef IEM_VERIFICATION_MODE
1132 return EMSTATE_REM;
1133#else
1134
1135 /*
1136 * When forcing raw-mode execution, things are simple.
1137 */
1138 if (pVCpu->em.s.fForceRAW)
1139 return EMSTATE_RAW;
1140
1141 /*
1142 * We stay in the wait for SIPI state unless explicitly told otherwise.
1143 */
1144 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1145 return EMSTATE_WAIT_SIPI;
1146
1147 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1148 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1149 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1150
1151 X86EFLAGS EFlags = pCtx->eflags;
1152 if (HWACCMIsEnabled(pVM))
1153 {
1154 /*
1155 * Hardware accelerated raw-mode:
1156 *
1157 * Typically only 32-bits protected mode, with paging enabled, code is
1158 * allowed here.
1159 */
1160 if ( EMIsHwVirtExecutionEnabled(pVM)
1161 && HWACCMR3CanExecuteGuest(pVM, pCtx))
1162 return EMSTATE_HWACC;
1163
1164 /*
1165 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1166 * turns off monitoring features essential for raw mode!
1167 */
1168 return EMSTATE_REM;
1169 }
1170
1171 /*
1172 * Standard raw-mode:
1173 *
1174 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1175 * or 32 bits protected mode ring 0 code
1176 *
1177 * The tests are ordered by the likelihood of being true during normal execution.
1178 */
1179 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1180 {
1181 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1182 return EMSTATE_REM;
1183 }
1184
1185# ifndef VBOX_RAW_V86
1186 if (EFlags.u32 & X86_EFL_VM) {
1187 Log2(("raw mode refused: VM_MASK\n"));
1188 return EMSTATE_REM;
1189 }
1190# endif
1191
1192 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1193 uint32_t u32CR0 = pCtx->cr0;
1194 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1195 {
1196 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1197 return EMSTATE_REM;
1198 }
1199
1200 if (pCtx->cr4 & X86_CR4_PAE)
1201 {
1202 uint32_t u32Dummy, u32Features;
1203
1204 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1205 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1206 return EMSTATE_REM;
1207 }
1208
1209 unsigned uSS = pCtx->ss;
1210 if ( pCtx->eflags.Bits.u1VM
1211 || (uSS & X86_SEL_RPL) == 3)
1212 {
1213 if (!EMIsRawRing3Enabled(pVM))
1214 return EMSTATE_REM;
1215
1216 if (!(EFlags.u32 & X86_EFL_IF))
1217 {
1218 Log2(("raw mode refused: IF (RawR3)\n"));
1219 return EMSTATE_REM;
1220 }
1221
1222 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1223 {
1224 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1225 return EMSTATE_REM;
1226 }
1227 }
1228 else
1229 {
1230 if (!EMIsRawRing0Enabled(pVM))
1231 return EMSTATE_REM;
1232
1233 /* Only ring 0 supervisor code. */
1234 if ((uSS & X86_SEL_RPL) != 0)
1235 {
1236 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1237 return EMSTATE_REM;
1238 }
1239
1240 // Let's start with pure 32 bits ring 0 code first
1241 /** @todo What's pure 32-bit mode? flat? */
1242 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
1243 || !(pCtx->csHid.Attr.n.u1DefBig))
1244 {
1245 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1246 return EMSTATE_REM;
1247 }
1248
1249 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1250 if (!(u32CR0 & X86_CR0_WP))
1251 {
1252 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1253 return EMSTATE_REM;
1254 }
1255
1256 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1257 {
1258 Log2(("raw r0 mode forced: patch code\n"));
1259 return EMSTATE_RAW;
1260 }
1261
1262# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1263 if (!(EFlags.u32 & X86_EFL_IF))
1264 {
1265 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1266 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1267 return EMSTATE_REM;
1268 }
1269# endif
1270
1271 /** @todo still necessary??? */
1272 if (EFlags.Bits.u2IOPL != 0)
1273 {
1274 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1275 return EMSTATE_REM;
1276 }
1277 }
1278
1279 Assert(PGMPhysIsA20Enabled(pVCpu));
1280 return EMSTATE_RAW;
1281#endif /* !IEM_VERIFICATION_MODE */
1282
1283}
1284
1285
1286/**
1287 * Executes all high priority post execution force actions.
1288 *
1289 * @returns rc or a fatal status code.
1290 *
1291 * @param pVM VM handle.
1292 * @param pVCpu VMCPU handle.
1293 * @param rc The current rc.
1294 */
1295int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1296{
1297 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1298 PDMCritSectFF(pVCpu);
1299
1300 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1301 CSAMR3DoPendingAction(pVM, pVCpu);
1302
1303 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1304 {
1305 if ( rc > VINF_EM_NO_MEMORY
1306 && rc <= VINF_EM_LAST)
1307 rc = VINF_EM_NO_MEMORY;
1308 }
1309
1310 return rc;
1311}
1312
1313
1314/**
1315 * Executes all pending forced actions.
1316 *
1317 * Forced actions can cause execution delays and execution
1318 * rescheduling. The first we deal with using action priority, so
1319 * that for instance pending timers aren't scheduled and ran until
1320 * right before execution. The rescheduling we deal with using
1321 * return codes. The same goes for VM termination, only in that case
1322 * we exit everything.
1323 *
1324 * @returns VBox status code of equal or greater importance/severity than rc.
1325 * The most important ones are: VINF_EM_RESCHEDULE,
1326 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1327 *
1328 * @param pVM VM handle.
1329 * @param pVCpu VMCPU handle.
1330 * @param rc The current rc.
1331 *
1332 */
1333int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1334{
1335 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1336#ifdef VBOX_STRICT
1337 int rcIrq = VINF_SUCCESS;
1338#endif
1339 int rc2;
1340#define UPDATE_RC() \
1341 do { \
1342 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1343 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1344 break; \
1345 if (!rc || rc2 < rc) \
1346 rc = rc2; \
1347 } while (0)
1348
1349 /*
1350 * Post execution chunk first.
1351 */
1352 if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1353 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
1354 {
1355 /*
1356 * EMT Rendezvous (must be serviced before termination).
1357 */
1358 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1359 {
1360 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1361 UPDATE_RC();
1362 /** @todo HACK ALERT! The following test is to make sure EM+TM
1363 * thinks the VM is stopped/reset before the next VM state change
1364 * is made. We need a better solution for this, or at least make it
1365 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1366 * VINF_EM_SUSPEND). */
1367 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1368 {
1369 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1370 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1371 return rc;
1372 }
1373 }
1374
1375 /*
1376 * State change request (cleared by vmR3SetStateLocked).
1377 */
1378 if (VM_FF_ISPENDING(pVM, VM_FF_CHECK_VM_STATE))
1379 {
1380 VMSTATE enmState = VMR3GetState(pVM);
1381 switch (enmState)
1382 {
1383 case VMSTATE_FATAL_ERROR:
1384 case VMSTATE_FATAL_ERROR_LS:
1385 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1386 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1387 return VINF_EM_SUSPEND;
1388
1389 case VMSTATE_DESTROYING:
1390 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1391 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1392 return VINF_EM_TERMINATE;
1393
1394 default:
1395 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1396 }
1397 }
1398
1399 /*
1400 * Debugger Facility polling.
1401 */
1402 if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
1403 {
1404 rc2 = DBGFR3VMMForcedAction(pVM);
1405 UPDATE_RC();
1406 }
1407
1408 /*
1409 * Postponed reset request.
1410 */
1411 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
1412 {
1413 rc2 = VMR3Reset(pVM);
1414 UPDATE_RC();
1415 }
1416
1417 /*
1418 * CSAM page scanning.
1419 */
1420 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1421 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1422 {
1423 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1424
1425 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1426 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1427
1428 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1429 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1430 }
1431
1432 /*
1433 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1434 */
1435 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1436 {
1437 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1438 UPDATE_RC();
1439 if (rc == VINF_EM_NO_MEMORY)
1440 return rc;
1441 }
1442
1443 /* check that we got them all */
1444 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1445 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
1446 }
1447
1448 /*
1449 * Normal priority then.
1450 * (Executed in no particular order.)
1451 */
1452 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1453 {
1454 /*
1455 * PDM Queues are pending.
1456 */
1457 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1458 PDMR3QueueFlushAll(pVM);
1459
1460 /*
1461 * PDM DMA transfers are pending.
1462 */
1463 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1464 PDMR3DmaRun(pVM);
1465
1466 /*
1467 * EMT Rendezvous (make sure they are handled before the requests).
1468 */
1469 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1470 {
1471 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1472 UPDATE_RC();
1473 /** @todo HACK ALERT! The following test is to make sure EM+TM
1474 * thinks the VM is stopped/reset before the next VM state change
1475 * is made. We need a better solution for this, or at least make it
1476 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1477 * VINF_EM_SUSPEND). */
1478 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1479 {
1480 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1481 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1482 return rc;
1483 }
1484 }
1485
1486 /*
1487 * Requests from other threads.
1488 */
1489 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1490 {
1491 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1492 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1493 {
1494 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1495 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1496 return rc2;
1497 }
1498 UPDATE_RC();
1499 /** @todo HACK ALERT! The following test is to make sure EM+TM
1500 * thinks the VM is stopped/reset before the next VM state change
1501 * is made. We need a better solution for this, or at least make it
1502 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1503 * VINF_EM_SUSPEND). */
1504 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1505 {
1506 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1507 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1508 return rc;
1509 }
1510 }
1511
1512 /* Replay the handler notification changes. */
1513 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1514 {
1515 /* Try not to cause deadlocks. */
1516 if ( pVM->cCpus == 1
1517 || ( !PGMIsLockOwner(pVM)
1518 && !IOMIsLockOwner(pVM))
1519 )
1520 {
1521 EMRemLock(pVM);
1522 REMR3ReplayHandlerNotifications(pVM);
1523 EMRemUnlock(pVM);
1524 }
1525 }
1526
1527 /* check that we got them all */
1528 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1529 }
1530
1531 /*
1532 * Normal priority then. (per-VCPU)
1533 * (Executed in no particular order.)
1534 */
1535 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1536 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1537 {
1538 /*
1539 * Requests from other threads.
1540 */
1541 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
1542 {
1543 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1544 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1545 {
1546 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1547 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1548 return rc2;
1549 }
1550 UPDATE_RC();
1551 /** @todo HACK ALERT! The following test is to make sure EM+TM
1552 * thinks the VM is stopped/reset before the next VM state change
1553 * is made. We need a better solution for this, or at least make it
1554 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1555 * VINF_EM_SUSPEND). */
1556 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1557 {
1558 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1559 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1560 return rc;
1561 }
1562 }
1563
1564 /* check that we got them all */
1565 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
1566 }
1567
1568 /*
1569 * High priority pre execution chunk last.
1570 * (Executed in ascending priority order.)
1571 */
1572 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1573 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1574 {
1575 /*
1576 * Timers before interrupts.
1577 */
1578 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
1579 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1580 TMR3TimerQueuesDo(pVM);
1581
1582 /*
1583 * The instruction following an emulated STI should *always* be executed!
1584 *
1585 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1586 * the eip is the same as the inhibited instr address. Before we
1587 * are able to execute this instruction in raw mode (iret to
1588 * guest code) an external interrupt might force a world switch
1589 * again. Possibly allowing a guest interrupt to be dispatched
1590 * in the process. This could break the guest. Sounds very
1591 * unlikely, but such timing sensitive problem are not as rare as
1592 * you might think.
1593 */
1594 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1595 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1596 {
1597 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1598 {
1599 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1600 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1601 }
1602 else
1603 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1604 }
1605
1606 /*
1607 * Interrupts.
1608 */
1609 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1610 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1611 && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
1612 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1613 && PATMAreInterruptsEnabled(pVM)
1614 && !HWACCMR3IsEventPending(pVCpu))
1615 {
1616 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1617 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1618 {
1619 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1620 /** @todo this really isn't nice, should properly handle this */
1621 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1622#ifdef VBOX_STRICT
1623 rcIrq = rc2;
1624#endif
1625 UPDATE_RC();
1626 }
1627 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1628 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1629 {
1630 rc2 = VINF_EM_RESCHEDULE_REM;
1631 UPDATE_RC();
1632 }
1633 }
1634
1635 /*
1636 * Allocate handy pages.
1637 */
1638 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1639 {
1640 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1641 UPDATE_RC();
1642 }
1643
1644 /*
1645 * Debugger Facility request.
1646 */
1647 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1648 {
1649 rc2 = DBGFR3VMMForcedAction(pVM);
1650 UPDATE_RC();
1651 }
1652
1653 /*
1654 * EMT Rendezvous (must be serviced before termination).
1655 */
1656 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1657 {
1658 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1659 UPDATE_RC();
1660 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1661 * stopped/reset before the next VM state change is made. We need a better
1662 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1663 * && rc >= VINF_EM_SUSPEND). */
1664 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1665 {
1666 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1667 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1668 return rc;
1669 }
1670 }
1671
1672 /*
1673 * State change request (cleared by vmR3SetStateLocked).
1674 */
1675 if (VM_FF_ISPENDING(pVM, VM_FF_CHECK_VM_STATE))
1676 {
1677 VMSTATE enmState = VMR3GetState(pVM);
1678 switch (enmState)
1679 {
1680 case VMSTATE_FATAL_ERROR:
1681 case VMSTATE_FATAL_ERROR_LS:
1682 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1683 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1684 return VINF_EM_SUSPEND;
1685
1686 case VMSTATE_DESTROYING:
1687 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1688 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1689 return VINF_EM_TERMINATE;
1690
1691 default:
1692 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1693 }
1694 }
1695
1696 /*
1697 * Out of memory? Since most of our fellow high priority actions may cause us
1698 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1699 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1700 * than us since we can terminate without allocating more memory.
1701 */
1702 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1703 {
1704 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1705 UPDATE_RC();
1706 if (rc == VINF_EM_NO_MEMORY)
1707 return rc;
1708 }
1709
1710 /*
1711 * If the virtual sync clock is still stopped, make TM restart it.
1712 */
1713 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
1714 TMR3VirtualSyncFF(pVM, pVCpu);
1715
1716#ifdef DEBUG
1717 /*
1718 * Debug, pause the VM.
1719 */
1720 if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
1721 {
1722 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
1723 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
1724 return VINF_EM_SUSPEND;
1725 }
1726#endif
1727
1728 /* check that we got them all */
1729 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1730 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
1731 }
1732
1733#undef UPDATE_RC
1734 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1735 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1736 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
1737 return rc;
1738}
1739
1740
1741/**
1742 * Check if the preset execution time cap restricts guest execution scheduling.
1743 *
1744 * @returns true if allowed, false otherwise
1745 * @param pVM The VM to operate on.
1746 * @param pVCpu The VMCPU to operate on.
1747 *
1748 */
1749VMMR3DECL(bool) EMR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
1750{
1751 uint64_t u64UserTime, u64KernelTime;
1752
1753 if ( pVM->uCpuExecutionCap != 100
1754 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
1755 {
1756 uint64_t u64TimeNow = RTTimeMilliTS();
1757 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
1758 {
1759 /* New time slice. */
1760 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
1761 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
1762 pVCpu->em.s.u64TimeSliceExec = 0;
1763 }
1764 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
1765
1766 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
1767 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
1768 return false;
1769 }
1770 return true;
1771}
1772
1773
1774/**
1775 * Execute VM.
1776 *
1777 * This function is the main loop of the VM. The emulation thread
1778 * calls this function when the VM has been successfully constructed
1779 * and we're ready for executing the VM.
1780 *
1781 * Returning from this function means that the VM is turned off or
1782 * suspended (state already saved) and deconstruction in next in line.
1783 *
1784 * All interaction from other thread are done using forced actions
1785 * and signaling of the wait object.
1786 *
1787 * @returns VBox status code, informational status codes may indicate failure.
1788 * @param pVM The VM to operate on.
1789 * @param pVCpu The VMCPU to operate on.
1790 */
1791VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
1792{
1793 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
1794 pVM,
1795 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
1796 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
1797 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
1798 pVCpu->em.s.fForceRAW));
1799 VM_ASSERT_EMT(pVM);
1800 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
1801 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
1802 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
1803 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
1804
1805 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
1806 if (rc == 0)
1807 {
1808 /*
1809 * Start the virtual time.
1810 */
1811 TMR3NotifyResume(pVM, pVCpu);
1812
1813 /*
1814 * The Outer Main Loop.
1815 */
1816 bool fFFDone = false;
1817
1818 /* Reschedule right away to start in the right state. */
1819 rc = VINF_SUCCESS;
1820
1821 /* If resuming after a pause or a state load, restore the previous
1822 state or else we'll start executing code. Else, just reschedule. */
1823 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
1824 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1825 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
1826 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
1827 else
1828 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1829
1830 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1831 for (;;)
1832 {
1833 /*
1834 * Before we can schedule anything (we're here because
1835 * scheduling is required) we must service any pending
1836 * forced actions to avoid any pending action causing
1837 * immediate rescheduling upon entering an inner loop
1838 *
1839 * Do forced actions.
1840 */
1841 if ( !fFFDone
1842 && rc != VINF_EM_TERMINATE
1843 && rc != VINF_EM_OFF
1844 && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1845 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
1846 {
1847 rc = emR3ForcedActions(pVM, pVCpu, rc);
1848 if ( ( rc == VINF_EM_RESCHEDULE_REM
1849 || rc == VINF_EM_RESCHEDULE_HWACC)
1850 && pVCpu->em.s.fForceRAW)
1851 rc = VINF_EM_RESCHEDULE_RAW;
1852 }
1853 else if (fFFDone)
1854 fFFDone = false;
1855
1856 /*
1857 * Now what to do?
1858 */
1859 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
1860 switch (rc)
1861 {
1862 /*
1863 * Keep doing what we're currently doing.
1864 */
1865 case VINF_SUCCESS:
1866 break;
1867
1868 /*
1869 * Reschedule - to raw-mode execution.
1870 */
1871 case VINF_EM_RESCHEDULE_RAW:
1872 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVCpu->em.s.enmState, EMSTATE_RAW));
1873 pVCpu->em.s.enmState = EMSTATE_RAW;
1874 break;
1875
1876 /*
1877 * Reschedule - to hardware accelerated raw-mode execution.
1878 */
1879 case VINF_EM_RESCHEDULE_HWACC:
1880 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVCpu->em.s.enmState, EMSTATE_HWACC));
1881 Assert(!pVCpu->em.s.fForceRAW);
1882 pVCpu->em.s.enmState = EMSTATE_HWACC;
1883 break;
1884
1885 /*
1886 * Reschedule - to recompiled execution.
1887 */
1888 case VINF_EM_RESCHEDULE_REM:
1889 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVCpu->em.s.enmState, EMSTATE_REM));
1890 pVCpu->em.s.enmState = EMSTATE_REM;
1891 break;
1892
1893 /*
1894 * Resume.
1895 */
1896 case VINF_EM_RESUME:
1897 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVCpu->em.s.enmState));
1898 /* Don't reschedule in the halted or wait for SIPI case. */
1899 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1900 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
1901 {
1902 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
1903 break;
1904 }
1905 /* fall through and get scheduled. */
1906
1907 /*
1908 * Reschedule.
1909 */
1910 case VINF_EM_RESCHEDULE:
1911 {
1912 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1913 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1914 pVCpu->em.s.enmState = enmState;
1915 break;
1916 }
1917
1918 /*
1919 * Halted.
1920 */
1921 case VINF_EM_HALT:
1922 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_HALTED));
1923 pVCpu->em.s.enmState = EMSTATE_HALTED;
1924 break;
1925
1926 /*
1927 * Switch to the wait for SIPI state (application processor only)
1928 */
1929 case VINF_EM_WAIT_SIPI:
1930 Assert(pVCpu->idCpu != 0);
1931 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_WAIT_SIPI));
1932 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1933 break;
1934
1935
1936 /*
1937 * Suspend.
1938 */
1939 case VINF_EM_SUSPEND:
1940 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1941 Assert(pVCpu->em.s.enmState != EMSTATE_SUSPENDED);
1942 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1943 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1944 break;
1945
1946 /*
1947 * Reset.
1948 * We might end up doing a double reset for now, we'll have to clean up the mess later.
1949 */
1950 case VINF_EM_RESET:
1951 {
1952 if (pVCpu->idCpu == 0)
1953 {
1954 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1955 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1956 pVCpu->em.s.enmState = enmState;
1957 }
1958 else
1959 {
1960 /* All other VCPUs go into the wait for SIPI state. */
1961 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1962 }
1963 break;
1964 }
1965
1966 /*
1967 * Power Off.
1968 */
1969 case VINF_EM_OFF:
1970 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1971 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1972 TMR3NotifySuspend(pVM, pVCpu);
1973 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1974 return rc;
1975
1976 /*
1977 * Terminate the VM.
1978 */
1979 case VINF_EM_TERMINATE:
1980 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1981 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1982 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
1983 TMR3NotifySuspend(pVM, pVCpu);
1984 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1985 return rc;
1986
1987
1988 /*
1989 * Out of memory, suspend the VM and stuff.
1990 */
1991 case VINF_EM_NO_MEMORY:
1992 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1993 Assert(pVCpu->em.s.enmState != EMSTATE_SUSPENDED);
1994 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1995 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1996 TMR3NotifySuspend(pVM, pVCpu);
1997 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1998
1999 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2000 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2001 if (rc != VINF_EM_SUSPEND)
2002 {
2003 if (RT_SUCCESS_NP(rc))
2004 {
2005 AssertLogRelMsgFailed(("%Rrc\n", rc));
2006 rc = VERR_EM_INTERNAL_ERROR;
2007 }
2008 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2009 }
2010 return rc;
2011
2012 /*
2013 * Guest debug events.
2014 */
2015 case VINF_EM_DBG_STEPPED:
2016 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
2017 case VINF_EM_DBG_STOP:
2018 case VINF_EM_DBG_BREAKPOINT:
2019 case VINF_EM_DBG_STEP:
2020 if (pVCpu->em.s.enmState == EMSTATE_RAW)
2021 {
2022 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
2023 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2024 }
2025 else
2026 {
2027 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
2028 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2029 }
2030 break;
2031
2032 /*
2033 * Hypervisor debug events.
2034 */
2035 case VINF_EM_DBG_HYPER_STEPPED:
2036 case VINF_EM_DBG_HYPER_BREAKPOINT:
2037 case VINF_EM_DBG_HYPER_ASSERTION:
2038 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_HYPER));
2039 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2040 break;
2041
2042 /*
2043 * Guru mediations.
2044 */
2045 case VERR_VMM_RING0_ASSERTION:
2046 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
2047 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2048 break;
2049
2050 /*
2051 * Any error code showing up here other than the ones we
2052 * know and process above are considered to be FATAL.
2053 *
2054 * Unknown warnings and informational status codes are also
2055 * included in this.
2056 */
2057 default:
2058 if (RT_SUCCESS_NP(rc))
2059 {
2060 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2061 rc = VERR_EM_INTERNAL_ERROR;
2062 }
2063 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
2064 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2065 break;
2066 }
2067
2068 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2069 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2070
2071 /*
2072 * Act on the state.
2073 */
2074 switch (pVCpu->em.s.enmState)
2075 {
2076 /*
2077 * Execute raw.
2078 */
2079 case EMSTATE_RAW:
2080#ifndef IEM_VERIFICATION_MODE /* remove later */
2081 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2082 break;
2083#endif
2084
2085 /*
2086 * Execute hardware accelerated raw.
2087 */
2088 case EMSTATE_HWACC:
2089#ifndef IEM_VERIFICATION_MODE /* remove later */
2090 rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
2091 break;
2092#endif
2093
2094 /*
2095 * Execute recompiled.
2096 */
2097 case EMSTATE_REM:
2098#ifdef IEM_VERIFICATION_MODE
2099# if 1
2100 rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); fFFDone = false;
2101# else
2102 rc = VBOXSTRICTRC_TODO(REMR3EmulateInstruction(pVM, pVCpu)); fFFDone = false;
2103 if (rc == VINF_EM_RESCHEDULE)
2104 rc = VINF_SUCCESS;
2105# endif
2106#else
2107 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2108#endif
2109 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2110 break;
2111
2112 /*
2113 * Application processor execution halted until SIPI.
2114 */
2115 case EMSTATE_WAIT_SIPI:
2116 /* no break */
2117 /*
2118 * hlt - execution halted until interrupt.
2119 */
2120 case EMSTATE_HALTED:
2121 {
2122 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2123 if (pVCpu->em.s.mwait.fWait & EMMWAIT_FLAG_ACTIVE)
2124 {
2125 /* mwait has a special extension where it's woken up when an interrupt is pending even when IF=0. */
2126 rc = VMR3WaitHalted(pVM, pVCpu, !(pVCpu->em.s.mwait.fWait & EMMWAIT_FLAG_BREAKIRQIF0) && !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2127 pVCpu->em.s.mwait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2128 }
2129 else
2130 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2131
2132 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2133 break;
2134 }
2135
2136 /*
2137 * Suspended - return to VM.cpp.
2138 */
2139 case EMSTATE_SUSPENDED:
2140 TMR3NotifySuspend(pVM, pVCpu);
2141 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2142 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2143 return VINF_EM_SUSPEND;
2144
2145 /*
2146 * Debugging in the guest.
2147 */
2148 case EMSTATE_DEBUG_GUEST_REM:
2149 case EMSTATE_DEBUG_GUEST_RAW:
2150 TMR3NotifySuspend(pVM, pVCpu);
2151 rc = emR3Debug(pVM, pVCpu, rc);
2152 TMR3NotifyResume(pVM, pVCpu);
2153 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2154 break;
2155
2156 /*
2157 * Debugging in the hypervisor.
2158 */
2159 case EMSTATE_DEBUG_HYPER:
2160 {
2161 TMR3NotifySuspend(pVM, pVCpu);
2162 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2163
2164 rc = emR3Debug(pVM, pVCpu, rc);
2165 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2166 if (rc != VINF_SUCCESS)
2167 {
2168 /* switch to guru meditation mode */
2169 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2170 VMMR3FatalDump(pVM, pVCpu, rc);
2171 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2172 return rc;
2173 }
2174
2175 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2176 TMR3NotifyResume(pVM, pVCpu);
2177 break;
2178 }
2179
2180 /*
2181 * Guru meditation takes place in the debugger.
2182 */
2183 case EMSTATE_GURU_MEDITATION:
2184 {
2185 TMR3NotifySuspend(pVM, pVCpu);
2186 VMMR3FatalDump(pVM, pVCpu, rc);
2187 emR3Debug(pVM, pVCpu, rc);
2188 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2189 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2190 return rc;
2191 }
2192
2193 /*
2194 * The states we don't expect here.
2195 */
2196 case EMSTATE_NONE:
2197 case EMSTATE_TERMINATING:
2198 default:
2199 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2200 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2201 TMR3NotifySuspend(pVM, pVCpu);
2202 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2203 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2204 return VERR_EM_INTERNAL_ERROR;
2205 }
2206 } /* The Outer Main Loop */
2207 }
2208 else
2209 {
2210 /*
2211 * Fatal error.
2212 */
2213 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2214 TMR3NotifySuspend(pVM, pVCpu);
2215 VMMR3FatalDump(pVM, pVCpu, rc);
2216 emR3Debug(pVM, pVCpu, rc);
2217 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2218 /** @todo change the VM state! */
2219 return rc;
2220 }
2221
2222 /* (won't ever get here). */
2223 AssertFailed();
2224}
2225
2226/**
2227 * Notify EM of a state change (used by FTM)
2228 *
2229 * @param pVM VM Handle.
2230 */
2231VMMR3DECL(int) EMR3NotifySuspend(PVM pVM)
2232{
2233 PVMCPU pVCpu = VMMGetCpu(pVM);
2234
2235 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2236 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2237 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2238 return VINF_SUCCESS;
2239}
2240
2241/**
2242 * Notify EM of a state change (used by FTM)
2243 *
2244 * @param pVM VM Handle.
2245 */
2246VMMR3DECL(int) EMR3NotifyResume(PVM pVM)
2247{
2248 PVMCPU pVCpu = VMMGetCpu(pVM);
2249 EMSTATE enmCurState = pVCpu->em.s.enmState;
2250
2251 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2252 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2253 pVCpu->em.s.enmPrevState = enmCurState;
2254 return VINF_SUCCESS;
2255}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette