VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 93554

Last change on this file since 93554 was 93204, checked in by vboxsync, 3 years ago

VMM/EM: Some debug stepping hacks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 116.9 KB
Line 
1/* $Id: EM.cpp 93204 2022-01-12 18:37:23Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#include <VBox/vmm/apic.h>
50#include <VBox/vmm/tm.h>
51#include <VBox/vmm/mm.h>
52#include <VBox/vmm/ssm.h>
53#include <VBox/vmm/pdmapi.h>
54#include <VBox/vmm/pdmcritsect.h>
55#include <VBox/vmm/pdmqueue.h>
56#include <VBox/vmm/hm.h>
57#include "EMInternal.h"
58#include <VBox/vmm/vm.h>
59#include <VBox/vmm/uvm.h>
60#include <VBox/vmm/cpumdis.h>
61#include <VBox/dis.h>
62#include <VBox/disopcode.h>
63#include <VBox/err.h>
64#include "VMMTracing.h"
65
66#include <iprt/asm.h>
67#include <iprt/string.h>
68#include <iprt/stream.h>
69#include <iprt/thread.h>
70
71
72/*********************************************************************************************************************************
73* Internal Functions *
74*********************************************************************************************************************************/
75static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
76static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
77#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
78static const char *emR3GetStateName(EMSTATE enmState);
79#endif
80static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
81#if defined(VBOX_WITH_REM) || defined(DEBUG)
82static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
83#endif
84static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
85
86
87/**
88 * Initializes the EM.
89 *
90 * @returns VBox status code.
91 * @param pVM The cross context VM structure.
92 */
93VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
94{
95 LogFlow(("EMR3Init\n"));
96 /*
97 * Assert alignment and sizes.
98 */
99 AssertCompileMemberAlignment(VM, em.s, 32);
100 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
101 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
102 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
103
104 /*
105 * Init the structure.
106 */
107 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
108 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
109
110 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
111 AssertLogRelRCReturn(rc, rc);
112
113 bool fEnabled;
114 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
115 AssertLogRelRCReturn(rc, rc);
116 pVM->em.s.fGuruOnTripleFault = !fEnabled;
117 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
118 {
119 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
120 pVM->em.s.fGuruOnTripleFault = true;
121 }
122
123 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
124
125 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
126 * Whether to try correlate exit history in any context, detect hot spots and
127 * try optimize these using IEM if there are other exits close by. This
128 * overrides the context specific settings. */
129 bool fExitOptimizationEnabled = true;
130 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
131 AssertLogRelRCReturn(rc, rc);
132
133 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
134 * Whether to optimize exits in ring-0. Setting this to false will also disable
135 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
136 * capabilities of the host kernel, this optimization may be unavailable. */
137 bool fExitOptimizationEnabledR0 = true;
138 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
139 AssertLogRelRCReturn(rc, rc);
140 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
141
142 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
143 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
144 * hooks are in effect). */
145 /** @todo change the default to true here */
146 bool fExitOptimizationEnabledR0PreemptDisabled = true;
147 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
148 AssertLogRelRCReturn(rc, rc);
149 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
150
151 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
152 * Maximum number of instruction to let EMHistoryExec execute in one go. */
153 uint16_t cHistoryExecMaxInstructions = 8192;
154 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
155 AssertLogRelRCReturn(rc, rc);
156 if (cHistoryExecMaxInstructions < 16)
157 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
158
159 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
160 * Maximum number of instruction between exits during probing. */
161 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
162#ifdef RT_OS_WINDOWS
163 if (VM_IS_NEM_ENABLED(pVM))
164 cHistoryProbeMaxInstructionsWithoutExit = 32;
165#endif
166 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
167 cHistoryProbeMaxInstructionsWithoutExit);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
171 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
172
173 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
174 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
175 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
176 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
177 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
178 cHistoryProbeMinInstructions);
179 AssertLogRelRCReturn(rc, rc);
180
181 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
182 {
183 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
184 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
185 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
186 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
187 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
188 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
189 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
190 }
191
192 /*
193 * Saved state.
194 */
195 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
196 NULL, NULL, NULL,
197 NULL, emR3Save, NULL,
198 NULL, emR3Load, NULL);
199 if (RT_FAILURE(rc))
200 return rc;
201
202 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
203 {
204 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
205
206 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
207 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
208 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
209 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
210
211# define EM_REG_COUNTER(a, b, c) \
212 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
213 AssertRC(rc);
214
215# define EM_REG_COUNTER_USED(a, b, c) \
216 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
217 AssertRC(rc);
218
219# define EM_REG_PROFILE(a, b, c) \
220 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
221 AssertRC(rc);
222
223# define EM_REG_PROFILE_ADV(a, b, c) \
224 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
225 AssertRC(rc);
226
227 /*
228 * Statistics.
229 */
230#ifdef VBOX_WITH_STATISTICS
231 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
232 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
233
234 /* these should be considered for release statistics. */
235 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
236 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
237 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
238 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
239 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
240 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
241 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
242 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
243#endif /* VBOX_WITH_STATISTICS */
244 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
245 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
246#ifdef VBOX_WITH_STATISTICS
247 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
248 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
249 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
250 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
251 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
252 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
253#endif /* VBOX_WITH_STATISTICS */
254
255 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
256 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
257 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
258 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
259 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
260
261 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
262
263 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
264 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
265 AssertRC(rc);
266
267 /* History record statistics */
268 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
269 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
270 AssertRC(rc);
271
272 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
273 {
274 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
275 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
276 AssertRC(rc);
277 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
278 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
279 AssertRC(rc);
280 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
281 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
282 AssertRC(rc);
283 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
284 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
285 AssertRC(rc);
286 }
287
288 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
289 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
290 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
291 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
292 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
293 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
294 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
295 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
296 }
297
298 emR3InitDbg(pVM);
299 return VINF_SUCCESS;
300}
301
302
303/**
304 * Called when a VM initialization stage is completed.
305 *
306 * @returns VBox status code.
307 * @param pVM The cross context VM structure.
308 * @param enmWhat The initialization state that was completed.
309 */
310VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
311{
312 if (enmWhat == VMINITCOMPLETED_RING0)
313 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
314 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
315 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
316 return VINF_SUCCESS;
317}
318
319
320/**
321 * Applies relocations to data and code managed by this
322 * component. This function will be called at init and
323 * whenever the VMM need to relocate it self inside the GC.
324 *
325 * @param pVM The cross context VM structure.
326 */
327VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
328{
329 LogFlow(("EMR3Relocate\n"));
330 RT_NOREF(pVM);
331}
332
333
334/**
335 * Reset the EM state for a CPU.
336 *
337 * Called by EMR3Reset and hot plugging.
338 *
339 * @param pVCpu The cross context virtual CPU structure.
340 */
341VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
342{
343 /* Reset scheduling state. */
344 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
345
346 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
347 out of the HALTED state here so that enmPrevState doesn't end up as
348 HALTED when EMR3Execute returns. */
349 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
350 {
351 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
352 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
353 }
354}
355
356
357/**
358 * Reset notification.
359 *
360 * @param pVM The cross context VM structure.
361 */
362VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
363{
364 Log(("EMR3Reset: \n"));
365 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
366 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
367}
368
369
370/**
371 * Terminates the EM.
372 *
373 * Termination means cleaning up and freeing all resources,
374 * the VM it self is at this point powered off or suspended.
375 *
376 * @returns VBox status code.
377 * @param pVM The cross context VM structure.
378 */
379VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
380{
381 RT_NOREF(pVM);
382 return VINF_SUCCESS;
383}
384
385
386/**
387 * Execute state save operation.
388 *
389 * @returns VBox status code.
390 * @param pVM The cross context VM structure.
391 * @param pSSM SSM operation handle.
392 */
393static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
394{
395 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
396 {
397 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
398
399 SSMR3PutBool(pSSM, false /*fForceRAW*/);
400
401 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
402 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
403 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
404
405 /* Save mwait state. */
406 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
407 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
408 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
409 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
410 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
411 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
412 AssertRCReturn(rc, rc);
413 }
414 return VINF_SUCCESS;
415}
416
417
418/**
419 * Execute state load operation.
420 *
421 * @returns VBox status code.
422 * @param pVM The cross context VM structure.
423 * @param pSSM SSM operation handle.
424 * @param uVersion Data layout version.
425 * @param uPass The data pass.
426 */
427static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
428{
429 /*
430 * Validate version.
431 */
432 if ( uVersion > EM_SAVED_STATE_VERSION
433 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
434 {
435 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
436 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
437 }
438 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
439
440 /*
441 * Load the saved state.
442 */
443 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
444 {
445 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
446
447 bool fForceRAWIgnored;
448 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
449 AssertRCReturn(rc, rc);
450
451 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
452 {
453 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
454 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
455
456 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
457 }
458 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
459 {
460 /* Load mwait state. */
461 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
462 AssertRCReturn(rc, rc);
463 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
464 AssertRCReturn(rc, rc);
465 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
466 AssertRCReturn(rc, rc);
467 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
468 AssertRCReturn(rc, rc);
469 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
470 AssertRCReturn(rc, rc);
471 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
472 AssertRCReturn(rc, rc);
473 }
474 }
475 return VINF_SUCCESS;
476}
477
478
479/**
480 * Argument packet for emR3SetExecutionPolicy.
481 */
482struct EMR3SETEXECPOLICYARGS
483{
484 EMEXECPOLICY enmPolicy;
485 bool fEnforce;
486};
487
488
489/**
490 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
491 */
492static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
493{
494 /*
495 * Only the first CPU changes the variables.
496 */
497 if (pVCpu->idCpu == 0)
498 {
499 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
500 switch (pArgs->enmPolicy)
501 {
502 case EMEXECPOLICY_RECOMPILE_RING0:
503 case EMEXECPOLICY_RECOMPILE_RING3:
504 break;
505 case EMEXECPOLICY_IEM_ALL:
506 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
507
508 /* For making '.alliem 1' useful during debugging, transition the
509 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
510 for (VMCPUID i = 0; i < pVM->cCpus; i++)
511 {
512 PVMCPU pVCpuX = pVM->apCpusR3[i];
513 switch (pVCpuX->em.s.enmState)
514 {
515 case EMSTATE_DEBUG_GUEST_RAW:
516 case EMSTATE_DEBUG_GUEST_HM:
517 case EMSTATE_DEBUG_GUEST_NEM:
518 case EMSTATE_DEBUG_GUEST_REM:
519 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
520 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
521 break;
522 case EMSTATE_DEBUG_GUEST_IEM:
523 default:
524 break;
525 }
526 }
527 break;
528 default:
529 AssertFailedReturn(VERR_INVALID_PARAMETER);
530 }
531 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
532 }
533
534 /*
535 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
536 */
537 return pVCpu->em.s.enmState == EMSTATE_RAW
538 || pVCpu->em.s.enmState == EMSTATE_HM
539 || pVCpu->em.s.enmState == EMSTATE_NEM
540 || pVCpu->em.s.enmState == EMSTATE_IEM
541 || pVCpu->em.s.enmState == EMSTATE_REM
542 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
543 ? VINF_EM_RESCHEDULE
544 : VINF_SUCCESS;
545}
546
547
548/**
549 * Changes an execution scheduling policy parameter.
550 *
551 * This is used to enable or disable raw-mode / hardware-virtualization
552 * execution of user and supervisor code.
553 *
554 * @returns VINF_SUCCESS on success.
555 * @returns VINF_RESCHEDULE if a rescheduling might be required.
556 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
557 *
558 * @param pUVM The user mode VM handle.
559 * @param enmPolicy The scheduling policy to change.
560 * @param fEnforce Whether to enforce the policy or not.
561 */
562VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
563{
564 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
565 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
566 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
567
568 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
569 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
570}
571
572
573/**
574 * Queries an execution scheduling policy parameter.
575 *
576 * @returns VBox status code
577 * @param pUVM The user mode VM handle.
578 * @param enmPolicy The scheduling policy to query.
579 * @param pfEnforced Where to return the current value.
580 */
581VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
582{
583 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
584 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
585 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
586 PVM pVM = pUVM->pVM;
587 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
588
589 /* No need to bother EMTs with a query. */
590 switch (enmPolicy)
591 {
592 case EMEXECPOLICY_RECOMPILE_RING0:
593 case EMEXECPOLICY_RECOMPILE_RING3:
594 *pfEnforced = false;
595 break;
596 case EMEXECPOLICY_IEM_ALL:
597 *pfEnforced = pVM->em.s.fIemExecutesAll;
598 break;
599 default:
600 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
601 }
602
603 return VINF_SUCCESS;
604}
605
606
607/**
608 * Queries the main execution engine of the VM.
609 *
610 * @returns VBox status code
611 * @param pUVM The user mode VM handle.
612 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
613 */
614VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
615{
616 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
617 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
618
619 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
620 PVM pVM = pUVM->pVM;
621 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
622
623 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Raise a fatal error.
630 *
631 * Safely terminate the VM with full state report and stuff. This function
632 * will naturally never return.
633 *
634 * @param pVCpu The cross context virtual CPU structure.
635 * @param rc VBox status code.
636 */
637VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
638{
639 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
640 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
641}
642
643
644#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
645/**
646 * Gets the EM state name.
647 *
648 * @returns pointer to read only state name,
649 * @param enmState The state.
650 */
651static const char *emR3GetStateName(EMSTATE enmState)
652{
653 switch (enmState)
654 {
655 case EMSTATE_NONE: return "EMSTATE_NONE";
656 case EMSTATE_RAW: return "EMSTATE_RAW";
657 case EMSTATE_HM: return "EMSTATE_HM";
658 case EMSTATE_IEM: return "EMSTATE_IEM";
659 case EMSTATE_REM: return "EMSTATE_REM";
660 case EMSTATE_HALTED: return "EMSTATE_HALTED";
661 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
662 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
663 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
664 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
665 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
666 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
667 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
668 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
669 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
670 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
671 case EMSTATE_NEM: return "EMSTATE_NEM";
672 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
673 default: return "Unknown!";
674 }
675}
676#endif /* LOG_ENABLED || VBOX_STRICT */
677
678
679/**
680 * Handle pending ring-3 I/O port write.
681 *
682 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
683 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
684 *
685 * @returns Strict VBox status code.
686 * @param pVM The cross context VM structure.
687 * @param pVCpu The cross context virtual CPU structure.
688 */
689VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
690{
691 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
692
693 /* Get and clear the pending data. */
694 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
695 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
696 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
697 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
698 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
699
700 /* Assert sanity. */
701 switch (cbValue)
702 {
703 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
704 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
705 case 4: break;
706 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
707 }
708 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
709
710 /* Do the work.*/
711 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
712 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
713 if (IOM_SUCCESS(rcStrict))
714 {
715 pVCpu->cpum.GstCtx.rip += cbInstr;
716 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
717 }
718 return rcStrict;
719}
720
721
722/**
723 * Handle pending ring-3 I/O port write.
724 *
725 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
726 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
727 *
728 * @returns Strict VBox status code.
729 * @param pVM The cross context VM structure.
730 * @param pVCpu The cross context virtual CPU structure.
731 */
732VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
733{
734 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
735
736 /* Get and clear the pending data. */
737 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
738 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
739 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
740 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
741
742 /* Assert sanity. */
743 switch (cbValue)
744 {
745 case 1: break;
746 case 2: break;
747 case 4: break;
748 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
749 }
750 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
751 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
752
753 /* Do the work.*/
754 uint32_t uValue = 0;
755 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
756 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
757 if (IOM_SUCCESS(rcStrict))
758 {
759 if (cbValue == 4)
760 pVCpu->cpum.GstCtx.rax = uValue;
761 else if (cbValue == 2)
762 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
763 else
764 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
765 pVCpu->cpum.GstCtx.rip += cbInstr;
766 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
767 }
768 return rcStrict;
769}
770
771
772/**
773 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
774 * Worker for emR3ExecuteSplitLockInstruction}
775 */
776static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
777{
778 /* Only execute on the specified EMT. */
779 if (pVCpu == (PVMCPU)pvUser)
780 {
781 LogFunc(("\n"));
782 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
783 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
784 if (rcStrict == VINF_IEM_RAISED_XCPT)
785 rcStrict = VINF_SUCCESS;
786 return rcStrict;
787 }
788 RT_NOREF(pVM);
789 return VINF_SUCCESS;
790}
791
792
793/**
794 * Handle an instruction causing a split cacheline lock access in SMP VMs.
795 *
796 * Generally we only get here if the host has split-lock detection enabled and
797 * this caused an \#AC because of something the guest did. If we interpret the
798 * instruction as-is, we'll likely just repeat the split-lock access and
799 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
800 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
801 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
802 * disregard the lock prefix when emulating the instruction.
803 *
804 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
805 * feature when entering guest context, but the support for the feature isn't a
806 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
807 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
808 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
809 * propert detection to SUPDrv later if we find it necessary.
810 *
811 * @see @bugref{10052}
812 *
813 * @returns Strict VBox status code.
814 * @param pVM The cross context VM structure.
815 * @param pVCpu The cross context virtual CPU structure.
816 */
817VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
818{
819 LogFunc(("\n"));
820 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
821}
822
823
824/**
825 * Debug loop.
826 *
827 * @returns VBox status code for EM.
828 * @param pVM The cross context VM structure.
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param rc Current EM VBox status code.
831 */
832static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
833{
834 for (;;)
835 {
836 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
837 const VBOXSTRICTRC rcLast = rc;
838
839 /*
840 * Debug related RC.
841 */
842 switch (VBOXSTRICTRC_VAL(rc))
843 {
844 /*
845 * Single step an instruction.
846 */
847 case VINF_EM_DBG_STEP:
848 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
849 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
850 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
851 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
852 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
853 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
854 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
855#ifdef VBOX_WITH_REM /** @todo fix me? */
856 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
857 rc = emR3RemStep(pVM, pVCpu);
858#endif
859 else
860 {
861 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
862 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
863 rc = VINF_EM_DBG_STEPPED;
864 }
865 break;
866
867 /*
868 * Simple events: stepped, breakpoint, stop/assertion.
869 */
870 case VINF_EM_DBG_STEPPED:
871 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
872 break;
873
874 case VINF_EM_DBG_BREAKPOINT:
875 rc = DBGFR3BpHit(pVM, pVCpu);
876 break;
877
878 case VINF_EM_DBG_STOP:
879 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
880 break;
881
882 case VINF_EM_DBG_EVENT:
883 rc = DBGFR3EventHandlePending(pVM, pVCpu);
884 break;
885
886 case VINF_EM_DBG_HYPER_STEPPED:
887 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
888 break;
889
890 case VINF_EM_DBG_HYPER_BREAKPOINT:
891 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
892 break;
893
894 case VINF_EM_DBG_HYPER_ASSERTION:
895 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
896 RTLogFlush(NULL);
897 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
898 break;
899
900 /*
901 * Guru meditation.
902 */
903 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
904 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
905 break;
906 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
907 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
908 break;
909 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
910 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
911 break;
912
913 default: /** @todo don't use default for guru, but make special errors code! */
914 {
915 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
916 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
917 break;
918 }
919 }
920
921 /*
922 * Process the result.
923 */
924 switch (VBOXSTRICTRC_VAL(rc))
925 {
926 /*
927 * Continue the debugging loop.
928 */
929 case VINF_EM_DBG_STEP:
930 case VINF_EM_DBG_STOP:
931 case VINF_EM_DBG_EVENT:
932 case VINF_EM_DBG_STEPPED:
933 case VINF_EM_DBG_BREAKPOINT:
934 case VINF_EM_DBG_HYPER_STEPPED:
935 case VINF_EM_DBG_HYPER_BREAKPOINT:
936 case VINF_EM_DBG_HYPER_ASSERTION:
937 break;
938
939 /*
940 * Resuming execution (in some form) has to be done here if we got
941 * a hypervisor debug event.
942 */
943 case VINF_SUCCESS:
944 case VINF_EM_RESUME:
945 case VINF_EM_SUSPEND:
946 case VINF_EM_RESCHEDULE:
947 case VINF_EM_RESCHEDULE_RAW:
948 case VINF_EM_RESCHEDULE_REM:
949 case VINF_EM_HALT:
950 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
951 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
952 if (rc == VINF_SUCCESS)
953 rc = VINF_EM_RESCHEDULE;
954 return rc;
955
956 /*
957 * The debugger isn't attached.
958 * We'll simply turn the thing off since that's the easiest thing to do.
959 */
960 case VERR_DBGF_NOT_ATTACHED:
961 switch (VBOXSTRICTRC_VAL(rcLast))
962 {
963 case VINF_EM_DBG_HYPER_STEPPED:
964 case VINF_EM_DBG_HYPER_BREAKPOINT:
965 case VINF_EM_DBG_HYPER_ASSERTION:
966 case VERR_TRPM_PANIC:
967 case VERR_TRPM_DONT_PANIC:
968 case VERR_VMM_RING0_ASSERTION:
969 case VERR_VMM_HYPER_CR3_MISMATCH:
970 case VERR_VMM_RING3_CALL_DISABLED:
971 return rcLast;
972 }
973 return VINF_EM_OFF;
974
975 /*
976 * Status codes terminating the VM in one or another sense.
977 */
978 case VINF_EM_TERMINATE:
979 case VINF_EM_OFF:
980 case VINF_EM_RESET:
981 case VINF_EM_NO_MEMORY:
982 case VINF_EM_RAW_STALE_SELECTOR:
983 case VINF_EM_RAW_IRET_TRAP:
984 case VERR_TRPM_PANIC:
985 case VERR_TRPM_DONT_PANIC:
986 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
987 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
988 case VERR_VMM_RING0_ASSERTION:
989 case VERR_VMM_HYPER_CR3_MISMATCH:
990 case VERR_VMM_RING3_CALL_DISABLED:
991 case VERR_INTERNAL_ERROR:
992 case VERR_INTERNAL_ERROR_2:
993 case VERR_INTERNAL_ERROR_3:
994 case VERR_INTERNAL_ERROR_4:
995 case VERR_INTERNAL_ERROR_5:
996 case VERR_IPE_UNEXPECTED_STATUS:
997 case VERR_IPE_UNEXPECTED_INFO_STATUS:
998 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
999 return rc;
1000
1001 /*
1002 * The rest is unexpected, and will keep us here.
1003 */
1004 default:
1005 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1006 break;
1007 }
1008 } /* debug for ever */
1009}
1010
1011
1012#if defined(VBOX_WITH_REM) || defined(DEBUG)
1013/**
1014 * Steps recompiled code.
1015 *
1016 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1017 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1018 *
1019 * @param pVM The cross context VM structure.
1020 * @param pVCpu The cross context virtual CPU structure.
1021 */
1022static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1023{
1024 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1025
1026 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1027
1028 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1029 return rc;
1030}
1031#endif /* VBOX_WITH_REM || DEBUG */
1032
1033
1034/**
1035 * Executes recompiled code.
1036 *
1037 * This function contains the recompiler version of the inner
1038 * execution loop (the outer loop being in EMR3ExecuteVM()).
1039 *
1040 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1041 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1042 *
1043 * @param pVM The cross context VM structure.
1044 * @param pVCpu The cross context virtual CPU structure.
1045 * @param pfFFDone Where to store an indicator telling whether or not
1046 * FFs were done before returning.
1047 *
1048 */
1049static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1050{
1051#ifdef LOG_ENABLED
1052 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1053
1054 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1055 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1056 else
1057 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1058#endif
1059 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1060
1061#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1062 AssertMsg( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1063 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1064 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1065#endif
1066
1067 /*
1068 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1069 * or the REM suggests raw-mode execution.
1070 */
1071 *pfFFDone = false;
1072 uint32_t cLoops = 0;
1073 int rc = VINF_SUCCESS;
1074 for (;;)
1075 {
1076 /*
1077 * Execute REM.
1078 */
1079 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1080 {
1081 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1082 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1083 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1084 }
1085 else
1086 {
1087 /* Give up this time slice; virtual time continues */
1088 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1089 RTThreadSleep(5);
1090 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1091 rc = VINF_SUCCESS;
1092 }
1093
1094 /*
1095 * Deal with high priority post execution FFs before doing anything
1096 * else. Sync back the state and leave the lock to be on the safe side.
1097 */
1098 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1099 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1100 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1101
1102 /*
1103 * Process the returned status code.
1104 */
1105 if (rc != VINF_SUCCESS)
1106 {
1107 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1108 break;
1109 if (rc != VINF_REM_INTERRUPED_FF)
1110 {
1111 /* Try dodge unimplemented IEM trouble by reschduling. */
1112 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1113 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1114 {
1115 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1116 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1117 {
1118 rc = VINF_EM_RESCHEDULE;
1119 break;
1120 }
1121 }
1122
1123 /*
1124 * Anything which is not known to us means an internal error
1125 * and the termination of the VM!
1126 */
1127 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1128 break;
1129 }
1130 }
1131
1132
1133 /*
1134 * Check and execute forced actions.
1135 *
1136 * Sync back the VM state and leave the lock before calling any of
1137 * these, you never know what's going to happen here.
1138 */
1139#ifdef VBOX_HIGH_RES_TIMERS_HACK
1140 TMTimerPollVoid(pVM, pVCpu);
1141#endif
1142 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1143 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1144 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1145 {
1146 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1147 rc = emR3ForcedActions(pVM, pVCpu, rc);
1148 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1149 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1150 if ( rc != VINF_SUCCESS
1151 && rc != VINF_EM_RESCHEDULE_REM)
1152 {
1153 *pfFFDone = true;
1154 break;
1155 }
1156 }
1157
1158 /*
1159 * Have to check if we can get back to fast execution mode every so often.
1160 */
1161 if (!(++cLoops & 7))
1162 {
1163 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1164 if ( enmCheck != EMSTATE_REM
1165 && enmCheck != EMSTATE_IEM_THEN_REM)
1166 {
1167 LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));
1168 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1169 return VINF_EM_RESCHEDULE;
1170 }
1171 Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));
1172 }
1173
1174 } /* The Inner Loop, recompiled execution mode version. */
1175
1176 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1177 return rc;
1178}
1179
1180
1181#ifdef DEBUG
1182
1183int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1184{
1185 EMSTATE enmOldState = pVCpu->em.s.enmState;
1186
1187 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1188
1189 Log(("Single step BEGIN:\n"));
1190 for (uint32_t i = 0; i < cIterations; i++)
1191 {
1192 DBGFR3PrgStep(pVCpu);
1193 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1194 emR3RemStep(pVM, pVCpu);
1195 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1196 break;
1197 }
1198 Log(("Single step END:\n"));
1199 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1200 pVCpu->em.s.enmState = enmOldState;
1201 return VINF_EM_RESCHEDULE;
1202}
1203
1204#endif /* DEBUG */
1205
1206
1207/**
1208 * Try execute the problematic code in IEM first, then fall back on REM if there
1209 * is too much of it or if IEM doesn't implement something.
1210 *
1211 * @returns Strict VBox status code from IEMExecLots.
1212 * @param pVM The cross context VM structure.
1213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1214 * @param pfFFDone Force flags done indicator.
1215 *
1216 * @thread EMT(pVCpu)
1217 */
1218static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1219{
1220 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1221 *pfFFDone = false;
1222
1223 /*
1224 * Execute in IEM for a while.
1225 */
1226 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1227 {
1228 uint32_t cInstructions;
1229 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1230 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1231 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1232 if (rcStrict != VINF_SUCCESS)
1233 {
1234 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1235 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1236 break;
1237
1238 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1239 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1240 return rcStrict;
1241 }
1242
1243 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1244 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1245 {
1246 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1247 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1248 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1249 pVCpu->em.s.enmState = enmNewState;
1250 return VINF_SUCCESS;
1251 }
1252
1253 /*
1254 * Check for pending actions.
1255 */
1256 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1257 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1258 return VINF_SUCCESS;
1259 }
1260
1261 /*
1262 * Switch to REM.
1263 */
1264 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1265 pVCpu->em.s.enmState = EMSTATE_REM;
1266 return VINF_SUCCESS;
1267}
1268
1269
1270/**
1271 * Decides whether to execute RAW, HWACC or REM.
1272 *
1273 * @returns new EM state
1274 * @param pVM The cross context VM structure.
1275 * @param pVCpu The cross context virtual CPU structure.
1276 */
1277EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1278{
1279 /*
1280 * We stay in the wait for SIPI state unless explicitly told otherwise.
1281 */
1282 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1283 return EMSTATE_WAIT_SIPI;
1284
1285 /*
1286 * Execute everything in IEM?
1287 */
1288 if (pVM->em.s.fIemExecutesAll)
1289 return EMSTATE_IEM;
1290
1291 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1292 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1293 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1294
1295 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1296 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1297 {
1298 if (VM_IS_HM_ENABLED(pVM))
1299 {
1300 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1301 return EMSTATE_HM;
1302 }
1303 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1304 return EMSTATE_NEM;
1305
1306 /*
1307 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1308 * turns off monitoring features essential for raw mode!
1309 */
1310 return EMSTATE_IEM_THEN_REM;
1311 }
1312
1313 /*
1314 * Standard raw-mode:
1315 *
1316 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1317 * or 32 bits protected mode ring 0 code
1318 *
1319 * The tests are ordered by the likelihood of being true during normal execution.
1320 */
1321 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1322 {
1323 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1324 return EMSTATE_REM;
1325 }
1326
1327# ifndef VBOX_RAW_V86
1328 if (EFlags.u32 & X86_EFL_VM) {
1329 Log2(("raw mode refused: VM_MASK\n"));
1330 return EMSTATE_REM;
1331 }
1332# endif
1333
1334 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1335 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1336 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1337 {
1338 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1339 return EMSTATE_REM;
1340 }
1341
1342 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1343 {
1344 uint32_t u32Dummy, u32Features;
1345
1346 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1347 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1348 return EMSTATE_REM;
1349 }
1350
1351 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1352 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1353 || (uSS & X86_SEL_RPL) == 3)
1354 {
1355 if (!(EFlags.u32 & X86_EFL_IF))
1356 {
1357 Log2(("raw mode refused: IF (RawR3)\n"));
1358 return EMSTATE_REM;
1359 }
1360
1361 if (!(u32CR0 & X86_CR0_WP))
1362 {
1363 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1364 return EMSTATE_REM;
1365 }
1366 }
1367 else
1368 {
1369 /* Only ring 0 supervisor code. */
1370 if ((uSS & X86_SEL_RPL) != 0)
1371 {
1372 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1373 return EMSTATE_REM;
1374 }
1375
1376 // Let's start with pure 32 bits ring 0 code first
1377 /** @todo What's pure 32-bit mode? flat? */
1378 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1379 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1380 {
1381 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1382 return EMSTATE_REM;
1383 }
1384
1385 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1386 if (!(u32CR0 & X86_CR0_WP))
1387 {
1388 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1389 return EMSTATE_REM;
1390 }
1391
1392# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1393 if (!(EFlags.u32 & X86_EFL_IF))
1394 {
1395 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1396 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1397 return EMSTATE_REM;
1398 }
1399# endif
1400
1401# ifndef VBOX_WITH_RAW_RING1
1402 /** @todo still necessary??? */
1403 if (EFlags.Bits.u2IOPL != 0)
1404 {
1405 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1406 return EMSTATE_REM;
1407 }
1408# endif
1409 }
1410
1411 /*
1412 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1413 */
1414 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1415 {
1416 Log2(("raw mode refused: stale CS\n"));
1417 return EMSTATE_REM;
1418 }
1419 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1420 {
1421 Log2(("raw mode refused: stale SS\n"));
1422 return EMSTATE_REM;
1423 }
1424 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1425 {
1426 Log2(("raw mode refused: stale DS\n"));
1427 return EMSTATE_REM;
1428 }
1429 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1430 {
1431 Log2(("raw mode refused: stale ES\n"));
1432 return EMSTATE_REM;
1433 }
1434 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1435 {
1436 Log2(("raw mode refused: stale FS\n"));
1437 return EMSTATE_REM;
1438 }
1439 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1440 {
1441 Log2(("raw mode refused: stale GS\n"));
1442 return EMSTATE_REM;
1443 }
1444
1445# ifdef VBOX_WITH_SAFE_STR
1446 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1447 {
1448 Log(("Raw mode refused -> TR=0\n"));
1449 return EMSTATE_REM;
1450 }
1451# endif
1452
1453 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1454 return EMSTATE_RAW;
1455}
1456
1457
1458/**
1459 * Executes all high priority post execution force actions.
1460 *
1461 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1462 * fatal error status code.
1463 *
1464 * @param pVM The cross context VM structure.
1465 * @param pVCpu The cross context virtual CPU structure.
1466 * @param rc The current strict VBox status code rc.
1467 */
1468VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1469{
1470 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1471
1472 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1473 PDMCritSectBothFF(pVM, pVCpu);
1474
1475 /* Update CR3 (Nested Paging case for HM). */
1476 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1477 {
1478 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1479 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1480 if (RT_FAILURE(rc2))
1481 return rc2;
1482 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1483 }
1484
1485 /* IEM has pending work (typically memory write after INS instruction). */
1486 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1487 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1488
1489 /* IOM has pending work (comitting an I/O or MMIO write). */
1490 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1491 {
1492 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1493 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1494 { /* half likely, or at least it's a line shorter. */ }
1495 else if (rc == VINF_SUCCESS)
1496 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1497 else
1498 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1499 }
1500
1501 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1502 {
1503 if ( rc > VINF_EM_NO_MEMORY
1504 && rc <= VINF_EM_LAST)
1505 rc = VINF_EM_NO_MEMORY;
1506 }
1507
1508 return rc;
1509}
1510
1511
1512/**
1513 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1514 *
1515 * @returns VBox status code.
1516 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1517 * @param pVCpu The cross context virtual CPU structure.
1518 */
1519static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1520{
1521#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1522 /* Handle the "external interrupt" VM-exit intercept. */
1523 if (CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
1524 {
1525 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1526 AssertMsg( rcStrict != VINF_VMX_VMEXIT
1527 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1528 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1529 return VBOXSTRICTRC_TODO(rcStrict);
1530 }
1531#else
1532 RT_NOREF(pVCpu);
1533#endif
1534 return VINF_NO_CHANGE;
1535}
1536
1537
1538/**
1539 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1540 *
1541 * @returns VBox status code.
1542 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1543 * @param pVCpu The cross context virtual CPU structure.
1544 */
1545static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1546{
1547#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1548 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1549 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1550 {
1551 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1552 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1553 if (RT_SUCCESS(rcStrict))
1554 {
1555 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1556 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1557 return VBOXSTRICTRC_VAL(rcStrict);
1558 }
1559
1560 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1561 return VINF_EM_TRIPLE_FAULT;
1562 }
1563#else
1564 NOREF(pVCpu);
1565#endif
1566 return VINF_NO_CHANGE;
1567}
1568
1569
1570/**
1571 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1572 *
1573 * @returns VBox status code.
1574 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1575 * @param pVCpu The cross context virtual CPU structure.
1576 */
1577static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1578{
1579#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1580 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1581 {
1582 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1583 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1584 if (RT_SUCCESS(rcStrict))
1585 {
1586 Assert(rcStrict != VINF_SVM_VMEXIT);
1587 return VBOXSTRICTRC_VAL(rcStrict);
1588 }
1589 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1590 return VINF_EM_TRIPLE_FAULT;
1591 }
1592#else
1593 NOREF(pVCpu);
1594#endif
1595 return VINF_NO_CHANGE;
1596}
1597
1598
1599/**
1600 * Executes all pending forced actions.
1601 *
1602 * Forced actions can cause execution delays and execution
1603 * rescheduling. The first we deal with using action priority, so
1604 * that for instance pending timers aren't scheduled and ran until
1605 * right before execution. The rescheduling we deal with using
1606 * return codes. The same goes for VM termination, only in that case
1607 * we exit everything.
1608 *
1609 * @returns VBox status code of equal or greater importance/severity than rc.
1610 * The most important ones are: VINF_EM_RESCHEDULE,
1611 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1612 *
1613 * @param pVM The cross context VM structure.
1614 * @param pVCpu The cross context virtual CPU structure.
1615 * @param rc The current rc.
1616 *
1617 */
1618int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1619{
1620 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1621#ifdef VBOX_STRICT
1622 int rcIrq = VINF_SUCCESS;
1623#endif
1624 int rc2;
1625#define UPDATE_RC() \
1626 do { \
1627 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1628 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1629 break; \
1630 if (!rc || rc2 < rc) \
1631 rc = rc2; \
1632 } while (0)
1633 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1634
1635 /*
1636 * Post execution chunk first.
1637 */
1638 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1639 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1640 {
1641 /*
1642 * EMT Rendezvous (must be serviced before termination).
1643 */
1644 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1645 {
1646 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1647 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1648 UPDATE_RC();
1649 /** @todo HACK ALERT! The following test is to make sure EM+TM
1650 * thinks the VM is stopped/reset before the next VM state change
1651 * is made. We need a better solution for this, or at least make it
1652 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1653 * VINF_EM_SUSPEND). */
1654 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1655 {
1656 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1657 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1658 return rc;
1659 }
1660 }
1661
1662 /*
1663 * State change request (cleared by vmR3SetStateLocked).
1664 */
1665 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1666 {
1667 VMSTATE enmState = VMR3GetState(pVM);
1668 switch (enmState)
1669 {
1670 case VMSTATE_FATAL_ERROR:
1671 case VMSTATE_FATAL_ERROR_LS:
1672 case VMSTATE_GURU_MEDITATION:
1673 case VMSTATE_GURU_MEDITATION_LS:
1674 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1675 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1676 return VINF_EM_SUSPEND;
1677
1678 case VMSTATE_DESTROYING:
1679 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1680 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1681 return VINF_EM_TERMINATE;
1682
1683 default:
1684 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1685 }
1686 }
1687
1688 /*
1689 * Debugger Facility polling.
1690 */
1691 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1692 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1693 {
1694 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1695 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1696 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1697 * somewhere before we get here, I would think. */
1698 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1699 rc = rc2;
1700 else
1701 UPDATE_RC();
1702 }
1703
1704 /*
1705 * Postponed reset request.
1706 */
1707 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1708 {
1709 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1710 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1711 UPDATE_RC();
1712 }
1713
1714 /*
1715 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1716 */
1717 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1718 {
1719 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1720 UPDATE_RC();
1721 if (rc == VINF_EM_NO_MEMORY)
1722 return rc;
1723 }
1724
1725 /* check that we got them all */
1726 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1727 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1728 }
1729
1730 /*
1731 * Normal priority then.
1732 * (Executed in no particular order.)
1733 */
1734 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1735 {
1736 /*
1737 * PDM Queues are pending.
1738 */
1739 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1740 PDMR3QueueFlushAll(pVM);
1741
1742 /*
1743 * PDM DMA transfers are pending.
1744 */
1745 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1746 PDMR3DmaRun(pVM);
1747
1748 /*
1749 * EMT Rendezvous (make sure they are handled before the requests).
1750 */
1751 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1752 {
1753 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1754 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1755 UPDATE_RC();
1756 /** @todo HACK ALERT! The following test is to make sure EM+TM
1757 * thinks the VM is stopped/reset before the next VM state change
1758 * is made. We need a better solution for this, or at least make it
1759 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1760 * VINF_EM_SUSPEND). */
1761 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1762 {
1763 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1764 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1765 return rc;
1766 }
1767 }
1768
1769 /*
1770 * Requests from other threads.
1771 */
1772 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1773 {
1774 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1775 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1776 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1777 {
1778 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1779 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1780 return rc2;
1781 }
1782 UPDATE_RC();
1783 /** @todo HACK ALERT! The following test is to make sure EM+TM
1784 * thinks the VM is stopped/reset before the next VM state change
1785 * is made. We need a better solution for this, or at least make it
1786 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1787 * VINF_EM_SUSPEND). */
1788 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1789 {
1790 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1791 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1792 return rc;
1793 }
1794 }
1795
1796 /* check that we got them all */
1797 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1798 }
1799
1800 /*
1801 * Normal priority then. (per-VCPU)
1802 * (Executed in no particular order.)
1803 */
1804 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1805 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1806 {
1807 /*
1808 * Requests from other threads.
1809 */
1810 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1811 {
1812 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1813 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1814 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1815 {
1816 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1817 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1818 return rc2;
1819 }
1820 UPDATE_RC();
1821 /** @todo HACK ALERT! The following test is to make sure EM+TM
1822 * thinks the VM is stopped/reset before the next VM state change
1823 * is made. We need a better solution for this, or at least make it
1824 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1825 * VINF_EM_SUSPEND). */
1826 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1827 {
1828 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1829 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1830 return rc;
1831 }
1832 }
1833
1834 /* check that we got them all */
1835 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1836 }
1837
1838 /*
1839 * High priority pre execution chunk last.
1840 * (Executed in ascending priority order.)
1841 */
1842 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1843 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1844 {
1845 /*
1846 * Timers before interrupts.
1847 */
1848 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1849 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1850 TMR3TimerQueuesDo(pVM);
1851
1852 /*
1853 * Pick up asynchronously posted interrupts into the APIC.
1854 */
1855 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1856 APICUpdatePendingInterrupts(pVCpu);
1857
1858 /*
1859 * The instruction following an emulated STI should *always* be executed!
1860 *
1861 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1862 * the eip is the same as the inhibited instr address. Before we
1863 * are able to execute this instruction in raw mode (iret to
1864 * guest code) an external interrupt might force a world switch
1865 * again. Possibly allowing a guest interrupt to be dispatched
1866 * in the process. This could break the guest. Sounds very
1867 * unlikely, but such timing sensitive problem are not as rare as
1868 * you might think.
1869 */
1870 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1871 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1872 {
1873 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1874 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1875 {
1876 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1877 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1878 }
1879 else
1880 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1881 }
1882
1883 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1884 * delivered. */
1885
1886#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1887 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
1888 {
1889 /*
1890 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1891 * Takes priority over even SMI and INIT signals.
1892 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1893 */
1894 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1895 {
1896 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1897 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1898 UPDATE_RC();
1899 }
1900
1901 /*
1902 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1903 * Takes priority over "Traps on the previous instruction".
1904 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1905 */
1906 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1907 {
1908 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1909 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1910 UPDATE_RC();
1911 }
1912
1913 /*
1914 * VMX Nested-guest preemption timer VM-exit.
1915 * Takes priority over NMI-window VM-exits.
1916 */
1917 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1918 {
1919 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1920 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1921 UPDATE_RC();
1922 }
1923 }
1924#endif
1925
1926 /*
1927 * Guest event injection.
1928 */
1929 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1930 bool fWakeupPending = false;
1931 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1932 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1933 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
1934 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1935 {
1936 bool fInVmxNonRootMode;
1937 bool fInSvmHwvirtMode;
1938 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
1939 if (fInNestedGuest)
1940 {
1941 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1942 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1943 }
1944 else
1945 {
1946 fInVmxNonRootMode = false;
1947 fInSvmHwvirtMode = false;
1948 }
1949
1950 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
1951 if (fGif)
1952 {
1953#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1954 /*
1955 * VMX NMI-window VM-exit.
1956 * Takes priority over non-maskable interrupts (NMIs).
1957 * Interrupt shadows block NMI-window VM-exits.
1958 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1959 *
1960 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1961 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1962 */
1963 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1964 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1965 {
1966 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1967 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1968 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1969 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1970 && rc2 != VINF_VMX_VMEXIT
1971 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1972 UPDATE_RC();
1973 }
1974 else
1975#endif
1976 /*
1977 * NMIs (take priority over external interrupts).
1978 */
1979 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1980 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1981 {
1982#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1983 if ( fInVmxNonRootMode
1984 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1985 {
1986 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1987 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1988 UPDATE_RC();
1989 }
1990 else
1991#endif
1992#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1993 if ( fInSvmHwvirtMode
1994 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1995 {
1996 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1997 AssertMsg( rc2 != VINF_SVM_VMEXIT
1998 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1999 UPDATE_RC();
2000 }
2001 else
2002#endif
2003 {
2004 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
2005 if (rc2 == VINF_SUCCESS)
2006 {
2007 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
2008 fWakeupPending = true;
2009 if (pVM->em.s.fIemExecutesAll)
2010 rc2 = VINF_EM_RESCHEDULE;
2011 else
2012 {
2013 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
2014 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
2015 : VINF_EM_RESCHEDULE_REM;
2016 }
2017 }
2018 UPDATE_RC();
2019 }
2020 }
2021#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2022 /*
2023 * VMX Interrupt-window VM-exits.
2024 * Takes priority over external interrupts.
2025 */
2026 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
2027 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
2028 {
2029 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
2030 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
2031 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
2032 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2033 && rc2 != VINF_VMX_VMEXIT
2034 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2035 UPDATE_RC();
2036 }
2037#endif
2038#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2039 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
2040 * actually pending like we currently do. */
2041#endif
2042 /*
2043 * External interrupts.
2044 */
2045 else
2046 {
2047 /*
2048 * VMX: virtual interrupts takes priority over physical interrupts.
2049 * SVM: physical interrupts takes priority over virtual interrupts.
2050 */
2051 if ( fInVmxNonRootMode
2052 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2053 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
2054 {
2055 /** @todo NSTVMX: virtual-interrupt delivery. */
2056 rc2 = VINF_SUCCESS;
2057 }
2058 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2059 && CPUMIsGuestPhysIntrEnabled(pVCpu))
2060 {
2061 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2062 if (fInVmxNonRootMode)
2063 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
2064 else if (fInSvmHwvirtMode)
2065 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
2066 else
2067 rc2 = VINF_NO_CHANGE;
2068
2069 if (rc2 == VINF_NO_CHANGE)
2070 {
2071 bool fInjected = false;
2072 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2073 /** @todo this really isn't nice, should properly handle this */
2074 /* Note! This can still cause a VM-exit (on Intel). */
2075 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
2076 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
2077 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
2078 fWakeupPending = true;
2079 if ( pVM->em.s.fIemExecutesAll
2080 && ( rc2 == VINF_EM_RESCHEDULE_REM
2081 || rc2 == VINF_EM_RESCHEDULE_HM
2082 || rc2 == VINF_EM_RESCHEDULE_RAW))
2083 {
2084 rc2 = VINF_EM_RESCHEDULE;
2085 }
2086#ifdef VBOX_STRICT
2087 if (fInjected)
2088 rcIrq = rc2;
2089#endif
2090 }
2091 UPDATE_RC();
2092 }
2093 else if ( fInSvmHwvirtMode
2094 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2095 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2096 {
2097 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
2098 if (rc2 == VINF_NO_CHANGE)
2099 {
2100 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2101 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2102 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2103 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2104 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2105 rc2 = VINF_EM_RESCHEDULE;
2106#ifdef VBOX_STRICT
2107 rcIrq = rc2;
2108#endif
2109 }
2110 UPDATE_RC();
2111 }
2112 }
2113 }
2114 }
2115
2116 /*
2117 * Allocate handy pages.
2118 */
2119 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2120 {
2121 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2122 UPDATE_RC();
2123 }
2124
2125 /*
2126 * Debugger Facility request.
2127 */
2128 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2129 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2130 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2131 {
2132 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2133 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2134 UPDATE_RC();
2135 }
2136
2137 /*
2138 * EMT Rendezvous (must be serviced before termination).
2139 */
2140 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2141 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2142 {
2143 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2144 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2145 UPDATE_RC();
2146 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2147 * stopped/reset before the next VM state change is made. We need a better
2148 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2149 * && rc >= VINF_EM_SUSPEND). */
2150 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2151 {
2152 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2153 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2154 return rc;
2155 }
2156 }
2157
2158 /*
2159 * State change request (cleared by vmR3SetStateLocked).
2160 */
2161 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2162 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2163 {
2164 VMSTATE enmState = VMR3GetState(pVM);
2165 switch (enmState)
2166 {
2167 case VMSTATE_FATAL_ERROR:
2168 case VMSTATE_FATAL_ERROR_LS:
2169 case VMSTATE_GURU_MEDITATION:
2170 case VMSTATE_GURU_MEDITATION_LS:
2171 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2172 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2173 return VINF_EM_SUSPEND;
2174
2175 case VMSTATE_DESTROYING:
2176 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2177 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2178 return VINF_EM_TERMINATE;
2179
2180 default:
2181 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2182 }
2183 }
2184
2185 /*
2186 * Out of memory? Since most of our fellow high priority actions may cause us
2187 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2188 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2189 * than us since we can terminate without allocating more memory.
2190 */
2191 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2192 {
2193 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2194 UPDATE_RC();
2195 if (rc == VINF_EM_NO_MEMORY)
2196 return rc;
2197 }
2198
2199 /*
2200 * If the virtual sync clock is still stopped, make TM restart it.
2201 */
2202 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2203 TMR3VirtualSyncFF(pVM, pVCpu);
2204
2205#ifdef DEBUG
2206 /*
2207 * Debug, pause the VM.
2208 */
2209 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2210 {
2211 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2212 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2213 return VINF_EM_SUSPEND;
2214 }
2215#endif
2216
2217 /* check that we got them all */
2218 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2219 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2220 }
2221
2222#undef UPDATE_RC
2223 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2224 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2225 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2226 return rc;
2227}
2228
2229
2230/**
2231 * Check if the preset execution time cap restricts guest execution scheduling.
2232 *
2233 * @returns true if allowed, false otherwise
2234 * @param pVM The cross context VM structure.
2235 * @param pVCpu The cross context virtual CPU structure.
2236 */
2237bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2238{
2239 uint64_t u64UserTime, u64KernelTime;
2240
2241 if ( pVM->uCpuExecutionCap != 100
2242 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2243 {
2244 uint64_t u64TimeNow = RTTimeMilliTS();
2245 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2246 {
2247 /* New time slice. */
2248 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2249 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2250 pVCpu->em.s.u64TimeSliceExec = 0;
2251 }
2252 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2253
2254 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2255 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2256 return false;
2257 }
2258 return true;
2259}
2260
2261
2262/**
2263 * Execute VM.
2264 *
2265 * This function is the main loop of the VM. The emulation thread
2266 * calls this function when the VM has been successfully constructed
2267 * and we're ready for executing the VM.
2268 *
2269 * Returning from this function means that the VM is turned off or
2270 * suspended (state already saved) and deconstruction is next in line.
2271 *
2272 * All interaction from other thread are done using forced actions
2273 * and signalling of the wait object.
2274 *
2275 * @returns VBox status code, informational status codes may indicate failure.
2276 * @param pVM The cross context VM structure.
2277 * @param pVCpu The cross context virtual CPU structure.
2278 */
2279VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2280{
2281 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2282 pVM,
2283 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2284 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2285 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2286 VM_ASSERT_EMT(pVM);
2287 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2288 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2289 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2290 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2291
2292 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2293 if (rc == 0)
2294 {
2295 /*
2296 * Start the virtual time.
2297 */
2298 TMR3NotifyResume(pVM, pVCpu);
2299
2300 /*
2301 * The Outer Main Loop.
2302 */
2303 bool fFFDone = false;
2304
2305 /* Reschedule right away to start in the right state. */
2306 rc = VINF_SUCCESS;
2307
2308 /* If resuming after a pause or a state load, restore the previous
2309 state or else we'll start executing code. Else, just reschedule. */
2310 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2311 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2312 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2313 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2314 else
2315 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2316 pVCpu->em.s.cIemThenRemInstructions = 0;
2317 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2318
2319 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2320 for (;;)
2321 {
2322 /*
2323 * Before we can schedule anything (we're here because
2324 * scheduling is required) we must service any pending
2325 * forced actions to avoid any pending action causing
2326 * immediate rescheduling upon entering an inner loop
2327 *
2328 * Do forced actions.
2329 */
2330 if ( !fFFDone
2331 && RT_SUCCESS(rc)
2332 && rc != VINF_EM_TERMINATE
2333 && rc != VINF_EM_OFF
2334 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2335 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2336 {
2337 rc = emR3ForcedActions(pVM, pVCpu, rc);
2338 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2339 }
2340 else if (fFFDone)
2341 fFFDone = false;
2342
2343 /*
2344 * Now what to do?
2345 */
2346 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2347 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2348 switch (rc)
2349 {
2350 /*
2351 * Keep doing what we're currently doing.
2352 */
2353 case VINF_SUCCESS:
2354 break;
2355
2356 /*
2357 * Reschedule - to raw-mode execution.
2358 */
2359/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2360 case VINF_EM_RESCHEDULE_RAW:
2361 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2362 if (VM_IS_RAW_MODE_ENABLED(pVM))
2363 {
2364 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2365 pVCpu->em.s.enmState = EMSTATE_RAW;
2366 }
2367 else
2368 {
2369 AssertLogRelFailed();
2370 pVCpu->em.s.enmState = EMSTATE_NONE;
2371 }
2372 break;
2373
2374 /*
2375 * Reschedule - to HM or NEM.
2376 */
2377 case VINF_EM_RESCHEDULE_HM:
2378 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2379 if (VM_IS_HM_ENABLED(pVM))
2380 {
2381 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2382 pVCpu->em.s.enmState = EMSTATE_HM;
2383 }
2384 else if (VM_IS_NEM_ENABLED(pVM))
2385 {
2386 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2387 pVCpu->em.s.enmState = EMSTATE_NEM;
2388 }
2389 else
2390 {
2391 AssertLogRelFailed();
2392 pVCpu->em.s.enmState = EMSTATE_NONE;
2393 }
2394 break;
2395
2396 /*
2397 * Reschedule - to recompiled execution.
2398 */
2399 case VINF_EM_RESCHEDULE_REM:
2400 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2401 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2402 {
2403 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2404 enmOldState, EMSTATE_IEM_THEN_REM));
2405 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2406 {
2407 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2408 pVCpu->em.s.cIemThenRemInstructions = 0;
2409 }
2410 }
2411 else
2412 {
2413 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2414 pVCpu->em.s.enmState = EMSTATE_REM;
2415 }
2416 break;
2417
2418 /*
2419 * Resume.
2420 */
2421 case VINF_EM_RESUME:
2422 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2423 /* Don't reschedule in the halted or wait for SIPI case. */
2424 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2425 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2426 {
2427 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2428 break;
2429 }
2430 /* fall through and get scheduled. */
2431 RT_FALL_THRU();
2432
2433 /*
2434 * Reschedule.
2435 */
2436 case VINF_EM_RESCHEDULE:
2437 {
2438 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2439 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2440 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2441 pVCpu->em.s.cIemThenRemInstructions = 0;
2442 pVCpu->em.s.enmState = enmState;
2443 break;
2444 }
2445
2446 /*
2447 * Halted.
2448 */
2449 case VINF_EM_HALT:
2450 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2451 pVCpu->em.s.enmState = EMSTATE_HALTED;
2452 break;
2453
2454 /*
2455 * Switch to the wait for SIPI state (application processor only)
2456 */
2457 case VINF_EM_WAIT_SIPI:
2458 Assert(pVCpu->idCpu != 0);
2459 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2460 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2461 break;
2462
2463
2464 /*
2465 * Suspend.
2466 */
2467 case VINF_EM_SUSPEND:
2468 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2469 Assert(enmOldState != EMSTATE_SUSPENDED);
2470 pVCpu->em.s.enmPrevState = enmOldState;
2471 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2472 break;
2473
2474 /*
2475 * Reset.
2476 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2477 */
2478 case VINF_EM_RESET:
2479 {
2480 if (pVCpu->idCpu == 0)
2481 {
2482 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2483 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2484 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2485 pVCpu->em.s.cIemThenRemInstructions = 0;
2486 pVCpu->em.s.enmState = enmState;
2487 }
2488 else
2489 {
2490 /* All other VCPUs go into the wait for SIPI state. */
2491 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2492 }
2493 break;
2494 }
2495
2496 /*
2497 * Power Off.
2498 */
2499 case VINF_EM_OFF:
2500 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2501 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2502 TMR3NotifySuspend(pVM, pVCpu);
2503 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2504 return rc;
2505
2506 /*
2507 * Terminate the VM.
2508 */
2509 case VINF_EM_TERMINATE:
2510 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2511 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2512 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2513 TMR3NotifySuspend(pVM, pVCpu);
2514 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2515 return rc;
2516
2517
2518 /*
2519 * Out of memory, suspend the VM and stuff.
2520 */
2521 case VINF_EM_NO_MEMORY:
2522 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2523 Assert(enmOldState != EMSTATE_SUSPENDED);
2524 pVCpu->em.s.enmPrevState = enmOldState;
2525 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2526 TMR3NotifySuspend(pVM, pVCpu);
2527 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2528
2529 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2530 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2531 if (rc != VINF_EM_SUSPEND)
2532 {
2533 if (RT_SUCCESS_NP(rc))
2534 {
2535 AssertLogRelMsgFailed(("%Rrc\n", rc));
2536 rc = VERR_EM_INTERNAL_ERROR;
2537 }
2538 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2539 }
2540 return rc;
2541
2542 /*
2543 * Guest debug events.
2544 */
2545 case VINF_EM_DBG_STEPPED:
2546 case VINF_EM_DBG_STOP:
2547 case VINF_EM_DBG_EVENT:
2548 case VINF_EM_DBG_BREAKPOINT:
2549 case VINF_EM_DBG_STEP:
2550 if (enmOldState == EMSTATE_RAW)
2551 {
2552 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2553 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2554 }
2555 else if (enmOldState == EMSTATE_HM)
2556 {
2557 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2558 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2559 }
2560 else if (enmOldState == EMSTATE_NEM)
2561 {
2562 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2563 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2564 }
2565 else if (enmOldState == EMSTATE_REM)
2566 {
2567 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2568 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2569 }
2570 else
2571 {
2572 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2573 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2574 }
2575 break;
2576
2577 /*
2578 * Hypervisor debug events.
2579 */
2580 case VINF_EM_DBG_HYPER_STEPPED:
2581 case VINF_EM_DBG_HYPER_BREAKPOINT:
2582 case VINF_EM_DBG_HYPER_ASSERTION:
2583 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2584 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2585 break;
2586
2587 /*
2588 * Triple fault.
2589 */
2590 case VINF_EM_TRIPLE_FAULT:
2591 if (!pVM->em.s.fGuruOnTripleFault)
2592 {
2593 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2594 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2595 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2596 continue;
2597 }
2598 /* Else fall through and trigger a guru. */
2599 RT_FALL_THRU();
2600
2601 case VERR_VMM_RING0_ASSERTION:
2602 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2603 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2604 break;
2605
2606 /*
2607 * Any error code showing up here other than the ones we
2608 * know and process above are considered to be FATAL.
2609 *
2610 * Unknown warnings and informational status codes are also
2611 * included in this.
2612 */
2613 default:
2614 if (RT_SUCCESS_NP(rc))
2615 {
2616 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2617 rc = VERR_EM_INTERNAL_ERROR;
2618 }
2619 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2620 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2621 break;
2622 }
2623
2624 /*
2625 * Act on state transition.
2626 */
2627 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2628 if (enmOldState != enmNewState)
2629 {
2630 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2631
2632 /* Clear MWait flags and the unhalt FF. */
2633 if ( enmOldState == EMSTATE_HALTED
2634 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2635 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2636 && ( enmNewState == EMSTATE_RAW
2637 || enmNewState == EMSTATE_HM
2638 || enmNewState == EMSTATE_NEM
2639 || enmNewState == EMSTATE_REM
2640 || enmNewState == EMSTATE_IEM_THEN_REM
2641 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2642 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2643 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2644 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2645 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2646 {
2647 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2648 {
2649 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2650 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2651 }
2652 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2653 {
2654 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2655 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2656 }
2657 }
2658 }
2659 else
2660 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2661
2662 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2663 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2664
2665 /*
2666 * Act on the new state.
2667 */
2668 switch (enmNewState)
2669 {
2670 /*
2671 * Execute raw.
2672 */
2673 case EMSTATE_RAW:
2674 AssertLogRelMsgFailed(("%Rrc\n", rc));
2675 rc = VERR_EM_INTERNAL_ERROR;
2676 break;
2677
2678 /*
2679 * Execute hardware accelerated raw.
2680 */
2681 case EMSTATE_HM:
2682 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2683 break;
2684
2685 /*
2686 * Execute hardware accelerated raw.
2687 */
2688 case EMSTATE_NEM:
2689 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2690 break;
2691
2692 /*
2693 * Execute recompiled.
2694 */
2695 case EMSTATE_REM:
2696 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2697 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2698 break;
2699
2700 /*
2701 * Execute in the interpreter.
2702 */
2703 case EMSTATE_IEM:
2704 {
2705 uint32_t cInstructions = 0;
2706#if 0 /* For testing purposes. */
2707 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2708 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2709 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2710 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2711 rc = VINF_SUCCESS;
2712 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2713#endif
2714 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2715 if (pVM->em.s.fIemExecutesAll)
2716 {
2717 Assert(rc != VINF_EM_RESCHEDULE_REM);
2718 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2719 Assert(rc != VINF_EM_RESCHEDULE_HM);
2720#ifdef VBOX_HIGH_RES_TIMERS_HACK
2721 if (cInstructions < 2048)
2722 TMTimerPollVoid(pVM, pVCpu);
2723#endif
2724 }
2725 fFFDone = false;
2726 break;
2727 }
2728
2729 /*
2730 * Execute in IEM, hoping we can quickly switch aback to HM
2731 * or RAW execution. If our hopes fail, we go to REM.
2732 */
2733 case EMSTATE_IEM_THEN_REM:
2734 {
2735 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2736 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2737 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2738 break;
2739 }
2740
2741 /*
2742 * Application processor execution halted until SIPI.
2743 */
2744 case EMSTATE_WAIT_SIPI:
2745 /* no break */
2746 /*
2747 * hlt - execution halted until interrupt.
2748 */
2749 case EMSTATE_HALTED:
2750 {
2751 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2752 /* If HM (or someone else) store a pending interrupt in
2753 TRPM, it must be dispatched ASAP without any halting.
2754 Anything pending in TRPM has been accepted and the CPU
2755 should already be the right state to receive it. */
2756 if (TRPMHasTrap(pVCpu))
2757 rc = VINF_EM_RESCHEDULE;
2758 /* MWAIT has a special extension where it's woken up when
2759 an interrupt is pending even when IF=0. */
2760 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2761 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2762 {
2763 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2764 if (rc == VINF_SUCCESS)
2765 {
2766 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2767 APICUpdatePendingInterrupts(pVCpu);
2768
2769 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2770 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2771 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2772 {
2773 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2774 rc = VINF_EM_RESCHEDULE;
2775 }
2776 }
2777 }
2778 else
2779 {
2780 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2781 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2782 check VMCPU_FF_UPDATE_APIC here. */
2783 if ( rc == VINF_SUCCESS
2784 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2785 {
2786 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2787 rc = VINF_EM_RESCHEDULE;
2788 }
2789 }
2790
2791 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2792 break;
2793 }
2794
2795 /*
2796 * Suspended - return to VM.cpp.
2797 */
2798 case EMSTATE_SUSPENDED:
2799 TMR3NotifySuspend(pVM, pVCpu);
2800 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2801 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2802 return VINF_EM_SUSPEND;
2803
2804 /*
2805 * Debugging in the guest.
2806 */
2807 case EMSTATE_DEBUG_GUEST_RAW:
2808 case EMSTATE_DEBUG_GUEST_HM:
2809 case EMSTATE_DEBUG_GUEST_NEM:
2810 case EMSTATE_DEBUG_GUEST_IEM:
2811 case EMSTATE_DEBUG_GUEST_REM:
2812 TMR3NotifySuspend(pVM, pVCpu);
2813 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2814 TMR3NotifyResume(pVM, pVCpu);
2815 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2816 break;
2817
2818 /*
2819 * Debugging in the hypervisor.
2820 */
2821 case EMSTATE_DEBUG_HYPER:
2822 {
2823 TMR3NotifySuspend(pVM, pVCpu);
2824 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2825
2826 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2827 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2828 if (rc != VINF_SUCCESS)
2829 {
2830 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2831 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2832 else
2833 {
2834 /* switch to guru meditation mode */
2835 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2836 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2837 VMMR3FatalDump(pVM, pVCpu, rc);
2838 }
2839 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2840 return rc;
2841 }
2842
2843 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2844 TMR3NotifyResume(pVM, pVCpu);
2845 break;
2846 }
2847
2848 /*
2849 * Guru meditation takes place in the debugger.
2850 */
2851 case EMSTATE_GURU_MEDITATION:
2852 {
2853 TMR3NotifySuspend(pVM, pVCpu);
2854 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2855 VMMR3FatalDump(pVM, pVCpu, rc);
2856 emR3Debug(pVM, pVCpu, rc);
2857 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2858 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2859 return rc;
2860 }
2861
2862 /*
2863 * The states we don't expect here.
2864 */
2865 case EMSTATE_NONE:
2866 case EMSTATE_TERMINATING:
2867 default:
2868 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2869 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2870 TMR3NotifySuspend(pVM, pVCpu);
2871 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2872 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2873 return VERR_EM_INTERNAL_ERROR;
2874 }
2875 } /* The Outer Main Loop */
2876 }
2877 else
2878 {
2879 /*
2880 * Fatal error.
2881 */
2882 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2883 TMR3NotifySuspend(pVM, pVCpu);
2884 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2885 VMMR3FatalDump(pVM, pVCpu, rc);
2886 emR3Debug(pVM, pVCpu, rc);
2887 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2888 /** @todo change the VM state! */
2889 return rc;
2890 }
2891
2892 /* not reached */
2893}
2894
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette