VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 94425

Last change on this file since 94425 was 93901, checked in by vboxsync, 3 years ago

VMM,Main,++: Removed VM_IS_RAW_MODE_ENABLED/VM_EXEC_ENGINE_RAW_MODE and added VM_IS_EXEC_ENGINE_IEM/VM_EXEC_ENGINE_IEM instead. In IMachineDebugger::getExecutionEngine VMExecutionEngine_RawMode was removed and VMExecutionEngine_Emulated added. Removed dead code and updated frontends accordingly. On darwin.arm64 HM now falls back on IEM execution since neither HM or NEM is availble there. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 111.5 KB
Line 
1/* $Id: EM.cpp 93901 2022-02-23 15:35:26Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#include <VBox/vmm/apic.h>
50#include <VBox/vmm/tm.h>
51#include <VBox/vmm/mm.h>
52#include <VBox/vmm/ssm.h>
53#include <VBox/vmm/pdmapi.h>
54#include <VBox/vmm/pdmcritsect.h>
55#include <VBox/vmm/pdmqueue.h>
56#include <VBox/vmm/hm.h>
57#include "EMInternal.h"
58#include <VBox/vmm/vm.h>
59#include <VBox/vmm/uvm.h>
60#include <VBox/vmm/cpumdis.h>
61#include <VBox/dis.h>
62#include <VBox/disopcode.h>
63#include <VBox/err.h>
64#include "VMMTracing.h"
65
66#include <iprt/asm.h>
67#include <iprt/string.h>
68#include <iprt/stream.h>
69#include <iprt/thread.h>
70
71
72/*********************************************************************************************************************************
73* Internal Functions *
74*********************************************************************************************************************************/
75static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
76static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
77#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
78static const char *emR3GetStateName(EMSTATE enmState);
79#endif
80static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
81#if defined(VBOX_WITH_REM) || defined(DEBUG)
82static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
83#endif
84static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
85
86
87/**
88 * Initializes the EM.
89 *
90 * @returns VBox status code.
91 * @param pVM The cross context VM structure.
92 */
93VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
94{
95 LogFlow(("EMR3Init\n"));
96 /*
97 * Assert alignment and sizes.
98 */
99 AssertCompileMemberAlignment(VM, em.s, 32);
100 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
101 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
102 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
103
104 /*
105 * Init the structure.
106 */
107 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
108 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
109
110 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
111#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN)
112 true
113#else
114 false
115#endif
116 );
117 AssertLogRelRCReturn(rc, rc);
118
119 bool fEnabled;
120 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
121 AssertLogRelRCReturn(rc, rc);
122 pVM->em.s.fGuruOnTripleFault = !fEnabled;
123 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
124 {
125 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
126 pVM->em.s.fGuruOnTripleFault = true;
127 }
128
129 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
130
131 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
132 * Whether to try correlate exit history in any context, detect hot spots and
133 * try optimize these using IEM if there are other exits close by. This
134 * overrides the context specific settings. */
135 bool fExitOptimizationEnabled = true;
136 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
137 AssertLogRelRCReturn(rc, rc);
138
139 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
140 * Whether to optimize exits in ring-0. Setting this to false will also disable
141 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
142 * capabilities of the host kernel, this optimization may be unavailable. */
143 bool fExitOptimizationEnabledR0 = true;
144 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
145 AssertLogRelRCReturn(rc, rc);
146 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
147
148 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
149 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
150 * hooks are in effect). */
151 /** @todo change the default to true here */
152 bool fExitOptimizationEnabledR0PreemptDisabled = true;
153 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
154 AssertLogRelRCReturn(rc, rc);
155 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
156
157 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
158 * Maximum number of instruction to let EMHistoryExec execute in one go. */
159 uint16_t cHistoryExecMaxInstructions = 8192;
160 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
161 AssertLogRelRCReturn(rc, rc);
162 if (cHistoryExecMaxInstructions < 16)
163 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
164
165 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
166 * Maximum number of instruction between exits during probing. */
167 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
168#ifdef RT_OS_WINDOWS
169 if (VM_IS_NEM_ENABLED(pVM))
170 cHistoryProbeMaxInstructionsWithoutExit = 32;
171#endif
172 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
173 cHistoryProbeMaxInstructionsWithoutExit);
174 AssertLogRelRCReturn(rc, rc);
175 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
176 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
177 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
178
179 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
180 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
181 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
182 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
183 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
184 cHistoryProbeMinInstructions);
185 AssertLogRelRCReturn(rc, rc);
186
187 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
188 {
189 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
190 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
191 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
192 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
193 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
194 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
195 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
196 }
197
198 /*
199 * Saved state.
200 */
201 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
202 NULL, NULL, NULL,
203 NULL, emR3Save, NULL,
204 NULL, emR3Load, NULL);
205 if (RT_FAILURE(rc))
206 return rc;
207
208 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
209 {
210 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
211
212 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
213 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
214 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
215 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
216
217# define EM_REG_COUNTER(a, b, c) \
218 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
219 AssertRC(rc);
220
221# define EM_REG_COUNTER_USED(a, b, c) \
222 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
223 AssertRC(rc);
224
225# define EM_REG_PROFILE(a, b, c) \
226 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
227 AssertRC(rc);
228
229# define EM_REG_PROFILE_ADV(a, b, c) \
230 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
231 AssertRC(rc);
232
233 /*
234 * Statistics.
235 */
236#ifdef VBOX_WITH_STATISTICS
237 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
238 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
239
240 /* these should be considered for release statistics. */
241 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
242 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
243 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
244 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
245 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
246 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
247 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
248 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
249#endif /* VBOX_WITH_STATISTICS */
250 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
251 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
252#ifdef VBOX_WITH_STATISTICS
253 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
254 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
255 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
256 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
257 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
258 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
259#endif /* VBOX_WITH_STATISTICS */
260
261 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
262 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
263 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
264 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
265 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
266
267 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
268
269 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
270 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
271 AssertRC(rc);
272
273 /* History record statistics */
274 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
275 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
276 AssertRC(rc);
277
278 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
279 {
280 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
281 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
282 AssertRC(rc);
283 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
284 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
285 AssertRC(rc);
286 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
287 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
288 AssertRC(rc);
289 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
290 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
291 AssertRC(rc);
292 }
293
294 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
295 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
296 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
297 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
298 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
299 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
300 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
301 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
302 }
303
304 emR3InitDbg(pVM);
305 return VINF_SUCCESS;
306}
307
308
309/**
310 * Called when a VM initialization stage is completed.
311 *
312 * @returns VBox status code.
313 * @param pVM The cross context VM structure.
314 * @param enmWhat The initialization state that was completed.
315 */
316VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
317{
318 if (enmWhat == VMINITCOMPLETED_RING0)
319 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
320 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
321 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
322 return VINF_SUCCESS;
323}
324
325
326/**
327 * Applies relocations to data and code managed by this
328 * component. This function will be called at init and
329 * whenever the VMM need to relocate it self inside the GC.
330 *
331 * @param pVM The cross context VM structure.
332 */
333VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
334{
335 LogFlow(("EMR3Relocate\n"));
336 RT_NOREF(pVM);
337}
338
339
340/**
341 * Reset the EM state for a CPU.
342 *
343 * Called by EMR3Reset and hot plugging.
344 *
345 * @param pVCpu The cross context virtual CPU structure.
346 */
347VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
348{
349 /* Reset scheduling state. */
350 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
351
352 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
353 out of the HALTED state here so that enmPrevState doesn't end up as
354 HALTED when EMR3Execute returns. */
355 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
356 {
357 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
358 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
359 }
360}
361
362
363/**
364 * Reset notification.
365 *
366 * @param pVM The cross context VM structure.
367 */
368VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
369{
370 Log(("EMR3Reset: \n"));
371 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
372 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
373}
374
375
376/**
377 * Terminates the EM.
378 *
379 * Termination means cleaning up and freeing all resources,
380 * the VM it self is at this point powered off or suspended.
381 *
382 * @returns VBox status code.
383 * @param pVM The cross context VM structure.
384 */
385VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
386{
387 RT_NOREF(pVM);
388 return VINF_SUCCESS;
389}
390
391
392/**
393 * Execute state save operation.
394 *
395 * @returns VBox status code.
396 * @param pVM The cross context VM structure.
397 * @param pSSM SSM operation handle.
398 */
399static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
400{
401 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
402 {
403 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
404
405 SSMR3PutBool(pSSM, false /*fForceRAW*/);
406
407 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
408 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
409 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
410
411 /* Save mwait state. */
412 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
413 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
414 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
415 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
416 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
417 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
418 AssertRCReturn(rc, rc);
419 }
420 return VINF_SUCCESS;
421}
422
423
424/**
425 * Execute state load operation.
426 *
427 * @returns VBox status code.
428 * @param pVM The cross context VM structure.
429 * @param pSSM SSM operation handle.
430 * @param uVersion Data layout version.
431 * @param uPass The data pass.
432 */
433static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
434{
435 /*
436 * Validate version.
437 */
438 if ( uVersion > EM_SAVED_STATE_VERSION
439 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
440 {
441 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
442 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
443 }
444 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
445
446 /*
447 * Load the saved state.
448 */
449 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
450 {
451 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
452
453 bool fForceRAWIgnored;
454 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
455 AssertRCReturn(rc, rc);
456
457 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
458 {
459 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
460 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
461
462 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
463 }
464 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
465 {
466 /* Load mwait state. */
467 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
468 AssertRCReturn(rc, rc);
469 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
470 AssertRCReturn(rc, rc);
471 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
472 AssertRCReturn(rc, rc);
473 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
474 AssertRCReturn(rc, rc);
475 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
476 AssertRCReturn(rc, rc);
477 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
478 AssertRCReturn(rc, rc);
479 }
480 }
481 return VINF_SUCCESS;
482}
483
484
485/**
486 * Argument packet for emR3SetExecutionPolicy.
487 */
488struct EMR3SETEXECPOLICYARGS
489{
490 EMEXECPOLICY enmPolicy;
491 bool fEnforce;
492};
493
494
495/**
496 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
497 */
498static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
499{
500 /*
501 * Only the first CPU changes the variables.
502 */
503 if (pVCpu->idCpu == 0)
504 {
505 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
506 switch (pArgs->enmPolicy)
507 {
508 case EMEXECPOLICY_RECOMPILE_RING0:
509 case EMEXECPOLICY_RECOMPILE_RING3:
510 break;
511 case EMEXECPOLICY_IEM_ALL:
512 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
513
514 /* For making '.alliem 1' useful during debugging, transition the
515 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
516 for (VMCPUID i = 0; i < pVM->cCpus; i++)
517 {
518 PVMCPU pVCpuX = pVM->apCpusR3[i];
519 switch (pVCpuX->em.s.enmState)
520 {
521 case EMSTATE_DEBUG_GUEST_RAW:
522 case EMSTATE_DEBUG_GUEST_HM:
523 case EMSTATE_DEBUG_GUEST_NEM:
524 case EMSTATE_DEBUG_GUEST_REM:
525 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
526 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
527 break;
528 case EMSTATE_DEBUG_GUEST_IEM:
529 default:
530 break;
531 }
532 }
533 break;
534 default:
535 AssertFailedReturn(VERR_INVALID_PARAMETER);
536 }
537 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
538 }
539
540 /*
541 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
542 */
543 return pVCpu->em.s.enmState == EMSTATE_RAW
544 || pVCpu->em.s.enmState == EMSTATE_HM
545 || pVCpu->em.s.enmState == EMSTATE_NEM
546 || pVCpu->em.s.enmState == EMSTATE_IEM
547 || pVCpu->em.s.enmState == EMSTATE_REM
548 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
549 ? VINF_EM_RESCHEDULE
550 : VINF_SUCCESS;
551}
552
553
554/**
555 * Changes an execution scheduling policy parameter.
556 *
557 * This is used to enable or disable raw-mode / hardware-virtualization
558 * execution of user and supervisor code.
559 *
560 * @returns VINF_SUCCESS on success.
561 * @returns VINF_RESCHEDULE if a rescheduling might be required.
562 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
563 *
564 * @param pUVM The user mode VM handle.
565 * @param enmPolicy The scheduling policy to change.
566 * @param fEnforce Whether to enforce the policy or not.
567 */
568VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
569{
570 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
571 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
572 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
573
574 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
575 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
576}
577
578
579/**
580 * Queries an execution scheduling policy parameter.
581 *
582 * @returns VBox status code
583 * @param pUVM The user mode VM handle.
584 * @param enmPolicy The scheduling policy to query.
585 * @param pfEnforced Where to return the current value.
586 */
587VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
588{
589 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
590 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
591 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
592 PVM pVM = pUVM->pVM;
593 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
594
595 /* No need to bother EMTs with a query. */
596 switch (enmPolicy)
597 {
598 case EMEXECPOLICY_RECOMPILE_RING0:
599 case EMEXECPOLICY_RECOMPILE_RING3:
600 *pfEnforced = false;
601 break;
602 case EMEXECPOLICY_IEM_ALL:
603 *pfEnforced = pVM->em.s.fIemExecutesAll;
604 break;
605 default:
606 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
607 }
608
609 return VINF_SUCCESS;
610}
611
612
613/**
614 * Queries the main execution engine of the VM.
615 *
616 * @returns VBox status code
617 * @param pUVM The user mode VM handle.
618 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
619 */
620VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
621{
622 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
623 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
624
625 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
626 PVM pVM = pUVM->pVM;
627 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
628
629 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
630 return VINF_SUCCESS;
631}
632
633
634/**
635 * Raise a fatal error.
636 *
637 * Safely terminate the VM with full state report and stuff. This function
638 * will naturally never return.
639 *
640 * @param pVCpu The cross context virtual CPU structure.
641 * @param rc VBox status code.
642 */
643VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
644{
645 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
646 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
647}
648
649
650#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
651/**
652 * Gets the EM state name.
653 *
654 * @returns pointer to read only state name,
655 * @param enmState The state.
656 */
657static const char *emR3GetStateName(EMSTATE enmState)
658{
659 switch (enmState)
660 {
661 case EMSTATE_NONE: return "EMSTATE_NONE";
662 case EMSTATE_RAW: return "EMSTATE_RAW";
663 case EMSTATE_HM: return "EMSTATE_HM";
664 case EMSTATE_IEM: return "EMSTATE_IEM";
665 case EMSTATE_REM: return "EMSTATE_REM";
666 case EMSTATE_HALTED: return "EMSTATE_HALTED";
667 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
668 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
669 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
670 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
671 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
672 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
673 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
674 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
675 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
676 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
677 case EMSTATE_NEM: return "EMSTATE_NEM";
678 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
679 default: return "Unknown!";
680 }
681}
682#endif /* LOG_ENABLED || VBOX_STRICT */
683
684
685/**
686 * Handle pending ring-3 I/O port write.
687 *
688 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
689 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
690 *
691 * @returns Strict VBox status code.
692 * @param pVM The cross context VM structure.
693 * @param pVCpu The cross context virtual CPU structure.
694 */
695VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
696{
697 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
698
699 /* Get and clear the pending data. */
700 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
701 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
702 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
703 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
704 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
705
706 /* Assert sanity. */
707 switch (cbValue)
708 {
709 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
710 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
711 case 4: break;
712 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
713 }
714 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
715
716 /* Do the work.*/
717 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
718 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
719 if (IOM_SUCCESS(rcStrict))
720 {
721 pVCpu->cpum.GstCtx.rip += cbInstr;
722 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
723 }
724 return rcStrict;
725}
726
727
728/**
729 * Handle pending ring-3 I/O port write.
730 *
731 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
732 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
733 *
734 * @returns Strict VBox status code.
735 * @param pVM The cross context VM structure.
736 * @param pVCpu The cross context virtual CPU structure.
737 */
738VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
739{
740 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
741
742 /* Get and clear the pending data. */
743 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
744 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
745 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
746 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
747
748 /* Assert sanity. */
749 switch (cbValue)
750 {
751 case 1: break;
752 case 2: break;
753 case 4: break;
754 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
755 }
756 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
757 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
758
759 /* Do the work.*/
760 uint32_t uValue = 0;
761 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
762 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
763 if (IOM_SUCCESS(rcStrict))
764 {
765 if (cbValue == 4)
766 pVCpu->cpum.GstCtx.rax = uValue;
767 else if (cbValue == 2)
768 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
769 else
770 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
771 pVCpu->cpum.GstCtx.rip += cbInstr;
772 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
773 }
774 return rcStrict;
775}
776
777
778/**
779 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
780 * Worker for emR3ExecuteSplitLockInstruction}
781 */
782static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
783{
784 /* Only execute on the specified EMT. */
785 if (pVCpu == (PVMCPU)pvUser)
786 {
787 LogFunc(("\n"));
788 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
789 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
790 if (rcStrict == VINF_IEM_RAISED_XCPT)
791 rcStrict = VINF_SUCCESS;
792 return rcStrict;
793 }
794 RT_NOREF(pVM);
795 return VINF_SUCCESS;
796}
797
798
799/**
800 * Handle an instruction causing a split cacheline lock access in SMP VMs.
801 *
802 * Generally we only get here if the host has split-lock detection enabled and
803 * this caused an \#AC because of something the guest did. If we interpret the
804 * instruction as-is, we'll likely just repeat the split-lock access and
805 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
806 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
807 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
808 * disregard the lock prefix when emulating the instruction.
809 *
810 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
811 * feature when entering guest context, but the support for the feature isn't a
812 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
813 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
814 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
815 * propert detection to SUPDrv later if we find it necessary.
816 *
817 * @see @bugref{10052}
818 *
819 * @returns Strict VBox status code.
820 * @param pVM The cross context VM structure.
821 * @param pVCpu The cross context virtual CPU structure.
822 */
823VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
824{
825 LogFunc(("\n"));
826 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
827}
828
829
830/**
831 * Debug loop.
832 *
833 * @returns VBox status code for EM.
834 * @param pVM The cross context VM structure.
835 * @param pVCpu The cross context virtual CPU structure.
836 * @param rc Current EM VBox status code.
837 */
838static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
839{
840 for (;;)
841 {
842 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
843 const VBOXSTRICTRC rcLast = rc;
844
845 /*
846 * Debug related RC.
847 */
848 switch (VBOXSTRICTRC_VAL(rc))
849 {
850 /*
851 * Single step an instruction.
852 */
853 case VINF_EM_DBG_STEP:
854 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
855 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
856 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
857 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
858 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
859 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
860 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
861#ifdef VBOX_WITH_REM /** @todo fix me? */
862 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
863 rc = emR3RemStep(pVM, pVCpu);
864#endif
865 else
866 {
867 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
868 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
869 rc = VINF_EM_DBG_STEPPED;
870 }
871 break;
872
873 /*
874 * Simple events: stepped, breakpoint, stop/assertion.
875 */
876 case VINF_EM_DBG_STEPPED:
877 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
878 break;
879
880 case VINF_EM_DBG_BREAKPOINT:
881 rc = DBGFR3BpHit(pVM, pVCpu);
882 break;
883
884 case VINF_EM_DBG_STOP:
885 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
886 break;
887
888 case VINF_EM_DBG_EVENT:
889 rc = DBGFR3EventHandlePending(pVM, pVCpu);
890 break;
891
892 case VINF_EM_DBG_HYPER_STEPPED:
893 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
894 break;
895
896 case VINF_EM_DBG_HYPER_BREAKPOINT:
897 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
898 break;
899
900 case VINF_EM_DBG_HYPER_ASSERTION:
901 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
902 RTLogFlush(NULL);
903 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
904 break;
905
906 /*
907 * Guru meditation.
908 */
909 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
910 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
911 break;
912 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
913 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
914 break;
915 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
916 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
917 break;
918
919 default: /** @todo don't use default for guru, but make special errors code! */
920 {
921 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
922 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
923 break;
924 }
925 }
926
927 /*
928 * Process the result.
929 */
930 switch (VBOXSTRICTRC_VAL(rc))
931 {
932 /*
933 * Continue the debugging loop.
934 */
935 case VINF_EM_DBG_STEP:
936 case VINF_EM_DBG_STOP:
937 case VINF_EM_DBG_EVENT:
938 case VINF_EM_DBG_STEPPED:
939 case VINF_EM_DBG_BREAKPOINT:
940 case VINF_EM_DBG_HYPER_STEPPED:
941 case VINF_EM_DBG_HYPER_BREAKPOINT:
942 case VINF_EM_DBG_HYPER_ASSERTION:
943 break;
944
945 /*
946 * Resuming execution (in some form) has to be done here if we got
947 * a hypervisor debug event.
948 */
949 case VINF_SUCCESS:
950 case VINF_EM_RESUME:
951 case VINF_EM_SUSPEND:
952 case VINF_EM_RESCHEDULE:
953 case VINF_EM_RESCHEDULE_RAW:
954 case VINF_EM_RESCHEDULE_REM:
955 case VINF_EM_HALT:
956 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
957 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
958 if (rc == VINF_SUCCESS)
959 rc = VINF_EM_RESCHEDULE;
960 return rc;
961
962 /*
963 * The debugger isn't attached.
964 * We'll simply turn the thing off since that's the easiest thing to do.
965 */
966 case VERR_DBGF_NOT_ATTACHED:
967 switch (VBOXSTRICTRC_VAL(rcLast))
968 {
969 case VINF_EM_DBG_HYPER_STEPPED:
970 case VINF_EM_DBG_HYPER_BREAKPOINT:
971 case VINF_EM_DBG_HYPER_ASSERTION:
972 case VERR_TRPM_PANIC:
973 case VERR_TRPM_DONT_PANIC:
974 case VERR_VMM_RING0_ASSERTION:
975 case VERR_VMM_HYPER_CR3_MISMATCH:
976 case VERR_VMM_RING3_CALL_DISABLED:
977 return rcLast;
978 }
979 return VINF_EM_OFF;
980
981 /*
982 * Status codes terminating the VM in one or another sense.
983 */
984 case VINF_EM_TERMINATE:
985 case VINF_EM_OFF:
986 case VINF_EM_RESET:
987 case VINF_EM_NO_MEMORY:
988 case VINF_EM_RAW_STALE_SELECTOR:
989 case VINF_EM_RAW_IRET_TRAP:
990 case VERR_TRPM_PANIC:
991 case VERR_TRPM_DONT_PANIC:
992 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
993 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
994 case VERR_VMM_RING0_ASSERTION:
995 case VERR_VMM_HYPER_CR3_MISMATCH:
996 case VERR_VMM_RING3_CALL_DISABLED:
997 case VERR_INTERNAL_ERROR:
998 case VERR_INTERNAL_ERROR_2:
999 case VERR_INTERNAL_ERROR_3:
1000 case VERR_INTERNAL_ERROR_4:
1001 case VERR_INTERNAL_ERROR_5:
1002 case VERR_IPE_UNEXPECTED_STATUS:
1003 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1004 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1005 return rc;
1006
1007 /*
1008 * The rest is unexpected, and will keep us here.
1009 */
1010 default:
1011 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1012 break;
1013 }
1014 } /* debug for ever */
1015}
1016
1017
1018#if defined(VBOX_WITH_REM) || defined(DEBUG)
1019/**
1020 * Steps recompiled code.
1021 *
1022 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1023 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1024 *
1025 * @param pVM The cross context VM structure.
1026 * @param pVCpu The cross context virtual CPU structure.
1027 */
1028static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1029{
1030 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1031
1032 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1033
1034 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1035 return rc;
1036}
1037#endif /* VBOX_WITH_REM || DEBUG */
1038
1039
1040/**
1041 * Executes recompiled code.
1042 *
1043 * This function contains the recompiler version of the inner
1044 * execution loop (the outer loop being in EMR3ExecuteVM()).
1045 *
1046 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1047 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1048 *
1049 * @param pVM The cross context VM structure.
1050 * @param pVCpu The cross context virtual CPU structure.
1051 * @param pfFFDone Where to store an indicator telling whether or not
1052 * FFs were done before returning.
1053 *
1054 */
1055static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1056{
1057#ifdef LOG_ENABLED
1058 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1059
1060 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1061 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1062 else
1063 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1064#endif
1065 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1066
1067 /*
1068 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1069 * or the REM suggests raw-mode execution.
1070 */
1071 *pfFFDone = false;
1072 uint32_t cLoops = 0;
1073 int rc = VINF_SUCCESS;
1074 for (;;)
1075 {
1076 /*
1077 * Execute REM.
1078 */
1079 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1080 {
1081 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1082 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1083 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1084 }
1085 else
1086 {
1087 /* Give up this time slice; virtual time continues */
1088 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1089 RTThreadSleep(5);
1090 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1091 rc = VINF_SUCCESS;
1092 }
1093
1094 /*
1095 * Deal with high priority post execution FFs before doing anything
1096 * else. Sync back the state and leave the lock to be on the safe side.
1097 */
1098 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1099 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1100 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1101
1102 /*
1103 * Process the returned status code.
1104 */
1105 if (rc != VINF_SUCCESS)
1106 {
1107 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1108 break;
1109 if (rc != VINF_REM_INTERRUPED_FF)
1110 {
1111 /* Try dodge unimplemented IEM trouble by reschduling. */
1112 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1113 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1114 {
1115 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1116 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1117 {
1118 rc = VINF_EM_RESCHEDULE;
1119 break;
1120 }
1121 }
1122
1123 /*
1124 * Anything which is not known to us means an internal error
1125 * and the termination of the VM!
1126 */
1127 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1128 break;
1129 }
1130 }
1131
1132
1133 /*
1134 * Check and execute forced actions.
1135 *
1136 * Sync back the VM state and leave the lock before calling any of
1137 * these, you never know what's going to happen here.
1138 */
1139#ifdef VBOX_HIGH_RES_TIMERS_HACK
1140 TMTimerPollVoid(pVM, pVCpu);
1141#endif
1142 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1143 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1144 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1145 {
1146 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1147 rc = emR3ForcedActions(pVM, pVCpu, rc);
1148 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1149 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1150 if ( rc != VINF_SUCCESS
1151 && rc != VINF_EM_RESCHEDULE_REM)
1152 {
1153 *pfFFDone = true;
1154 break;
1155 }
1156 }
1157
1158 /*
1159 * Have to check if we can get back to fast execution mode every so often.
1160 */
1161 if (!(++cLoops & 7))
1162 {
1163 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1164 if ( enmCheck != EMSTATE_REM
1165 && enmCheck != EMSTATE_IEM_THEN_REM)
1166 {
1167 LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));
1168 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1169 return VINF_EM_RESCHEDULE;
1170 }
1171 Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));
1172 }
1173
1174 } /* The Inner Loop, recompiled execution mode version. */
1175
1176 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1177 return rc;
1178}
1179
1180
1181#ifdef DEBUG
1182
1183int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1184{
1185 EMSTATE enmOldState = pVCpu->em.s.enmState;
1186
1187 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1188
1189 Log(("Single step BEGIN:\n"));
1190 for (uint32_t i = 0; i < cIterations; i++)
1191 {
1192 DBGFR3PrgStep(pVCpu);
1193 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1194 emR3RemStep(pVM, pVCpu);
1195 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1196 break;
1197 }
1198 Log(("Single step END:\n"));
1199 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1200 pVCpu->em.s.enmState = enmOldState;
1201 return VINF_EM_RESCHEDULE;
1202}
1203
1204#endif /* DEBUG */
1205
1206
1207/**
1208 * Try execute the problematic code in IEM first, then fall back on REM if there
1209 * is too much of it or if IEM doesn't implement something.
1210 *
1211 * @returns Strict VBox status code from IEMExecLots.
1212 * @param pVM The cross context VM structure.
1213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1214 * @param pfFFDone Force flags done indicator.
1215 *
1216 * @thread EMT(pVCpu)
1217 */
1218static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1219{
1220 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1221 *pfFFDone = false;
1222
1223 /*
1224 * Execute in IEM for a while.
1225 */
1226 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1227 {
1228 uint32_t cInstructions;
1229 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1230 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1231 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1232 if (rcStrict != VINF_SUCCESS)
1233 {
1234 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1235 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1236 break;
1237
1238 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1239 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1240 return rcStrict;
1241 }
1242
1243 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1244 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1245 {
1246 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1247 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1248 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1249 pVCpu->em.s.enmState = enmNewState;
1250 return VINF_SUCCESS;
1251 }
1252
1253 /*
1254 * Check for pending actions.
1255 */
1256 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1257 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1258 return VINF_SUCCESS;
1259 }
1260
1261 /*
1262 * Switch to REM.
1263 */
1264 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1265 pVCpu->em.s.enmState = EMSTATE_REM;
1266 return VINF_SUCCESS;
1267}
1268
1269
1270/**
1271 * Decides whether to execute RAW, HWACC or REM.
1272 *
1273 * @returns new EM state
1274 * @param pVM The cross context VM structure.
1275 * @param pVCpu The cross context virtual CPU structure.
1276 */
1277EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1278{
1279 /*
1280 * We stay in the wait for SIPI state unless explicitly told otherwise.
1281 */
1282 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1283 return EMSTATE_WAIT_SIPI;
1284
1285 /*
1286 * Execute everything in IEM?
1287 */
1288 if ( pVM->em.s.fIemExecutesAll
1289 || VM_IS_EXEC_ENGINE_IEM(pVM))
1290 return EMSTATE_IEM;
1291
1292 if (VM_IS_HM_ENABLED(pVM))
1293 {
1294 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1295 return EMSTATE_HM;
1296 }
1297 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1298 return EMSTATE_NEM;
1299
1300 /*
1301 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1302 * turns off monitoring features essential for raw mode!
1303 */
1304 return EMSTATE_IEM_THEN_REM;
1305}
1306
1307
1308/**
1309 * Executes all high priority post execution force actions.
1310 *
1311 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1312 * fatal error status code.
1313 *
1314 * @param pVM The cross context VM structure.
1315 * @param pVCpu The cross context virtual CPU structure.
1316 * @param rc The current strict VBox status code rc.
1317 */
1318VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1319{
1320 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1321
1322 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1323 PDMCritSectBothFF(pVM, pVCpu);
1324
1325 /* Update CR3 (Nested Paging case for HM). */
1326 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1327 {
1328 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1329 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1330 if (RT_FAILURE(rc2))
1331 return rc2;
1332 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1333 }
1334
1335 /* IEM has pending work (typically memory write after INS instruction). */
1336 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1337 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1338
1339 /* IOM has pending work (comitting an I/O or MMIO write). */
1340 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1341 {
1342 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1343 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1344 { /* half likely, or at least it's a line shorter. */ }
1345 else if (rc == VINF_SUCCESS)
1346 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1347 else
1348 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1349 }
1350
1351 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1352 {
1353 if ( rc > VINF_EM_NO_MEMORY
1354 && rc <= VINF_EM_LAST)
1355 rc = VINF_EM_NO_MEMORY;
1356 }
1357
1358 return rc;
1359}
1360
1361
1362/**
1363 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1364 *
1365 * @returns VBox status code.
1366 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1367 * @param pVCpu The cross context virtual CPU structure.
1368 */
1369static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1370{
1371#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1372 /* Handle the "external interrupt" VM-exit intercept. */
1373 if (CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
1374 {
1375 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1376 AssertMsg( rcStrict != VINF_VMX_VMEXIT
1377 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1378 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1379 return VBOXSTRICTRC_TODO(rcStrict);
1380 }
1381#else
1382 RT_NOREF(pVCpu);
1383#endif
1384 return VINF_NO_CHANGE;
1385}
1386
1387
1388/**
1389 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1390 *
1391 * @returns VBox status code.
1392 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1393 * @param pVCpu The cross context virtual CPU structure.
1394 */
1395static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1396{
1397#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1398 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1399 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1400 {
1401 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1402 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1403 if (RT_SUCCESS(rcStrict))
1404 {
1405 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1406 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1407 return VBOXSTRICTRC_VAL(rcStrict);
1408 }
1409
1410 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1411 return VINF_EM_TRIPLE_FAULT;
1412 }
1413#else
1414 NOREF(pVCpu);
1415#endif
1416 return VINF_NO_CHANGE;
1417}
1418
1419
1420/**
1421 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1422 *
1423 * @returns VBox status code.
1424 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1425 * @param pVCpu The cross context virtual CPU structure.
1426 */
1427static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1428{
1429#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1430 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1431 {
1432 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1433 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1434 if (RT_SUCCESS(rcStrict))
1435 {
1436 Assert(rcStrict != VINF_SVM_VMEXIT);
1437 return VBOXSTRICTRC_VAL(rcStrict);
1438 }
1439 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1440 return VINF_EM_TRIPLE_FAULT;
1441 }
1442#else
1443 NOREF(pVCpu);
1444#endif
1445 return VINF_NO_CHANGE;
1446}
1447
1448
1449/**
1450 * Executes all pending forced actions.
1451 *
1452 * Forced actions can cause execution delays and execution
1453 * rescheduling. The first we deal with using action priority, so
1454 * that for instance pending timers aren't scheduled and ran until
1455 * right before execution. The rescheduling we deal with using
1456 * return codes. The same goes for VM termination, only in that case
1457 * we exit everything.
1458 *
1459 * @returns VBox status code of equal or greater importance/severity than rc.
1460 * The most important ones are: VINF_EM_RESCHEDULE,
1461 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1462 *
1463 * @param pVM The cross context VM structure.
1464 * @param pVCpu The cross context virtual CPU structure.
1465 * @param rc The current rc.
1466 *
1467 */
1468int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1469{
1470 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1471#ifdef VBOX_STRICT
1472 int rcIrq = VINF_SUCCESS;
1473#endif
1474 int rc2;
1475#define UPDATE_RC() \
1476 do { \
1477 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1478 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1479 break; \
1480 if (!rc || rc2 < rc) \
1481 rc = rc2; \
1482 } while (0)
1483 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1484
1485 /*
1486 * Post execution chunk first.
1487 */
1488 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1489 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1490 {
1491 /*
1492 * EMT Rendezvous (must be serviced before termination).
1493 */
1494 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1495 {
1496 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1497 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1498 UPDATE_RC();
1499 /** @todo HACK ALERT! The following test is to make sure EM+TM
1500 * thinks the VM is stopped/reset before the next VM state change
1501 * is made. We need a better solution for this, or at least make it
1502 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1503 * VINF_EM_SUSPEND). */
1504 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1505 {
1506 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1507 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1508 return rc;
1509 }
1510 }
1511
1512 /*
1513 * State change request (cleared by vmR3SetStateLocked).
1514 */
1515 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1516 {
1517 VMSTATE enmState = VMR3GetState(pVM);
1518 switch (enmState)
1519 {
1520 case VMSTATE_FATAL_ERROR:
1521 case VMSTATE_FATAL_ERROR_LS:
1522 case VMSTATE_GURU_MEDITATION:
1523 case VMSTATE_GURU_MEDITATION_LS:
1524 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1525 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1526 return VINF_EM_SUSPEND;
1527
1528 case VMSTATE_DESTROYING:
1529 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1530 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1531 return VINF_EM_TERMINATE;
1532
1533 default:
1534 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1535 }
1536 }
1537
1538 /*
1539 * Debugger Facility polling.
1540 */
1541 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1542 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1543 {
1544 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1545 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1546 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1547 * somewhere before we get here, I would think. */
1548 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1549 rc = rc2;
1550 else
1551 UPDATE_RC();
1552 }
1553
1554 /*
1555 * Postponed reset request.
1556 */
1557 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1558 {
1559 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1560 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1561 UPDATE_RC();
1562 }
1563
1564 /*
1565 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1566 */
1567 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1568 {
1569 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1570 UPDATE_RC();
1571 if (rc == VINF_EM_NO_MEMORY)
1572 return rc;
1573 }
1574
1575 /* check that we got them all */
1576 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1577 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1578 }
1579
1580 /*
1581 * Normal priority then.
1582 * (Executed in no particular order.)
1583 */
1584 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1585 {
1586 /*
1587 * PDM Queues are pending.
1588 */
1589 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1590 PDMR3QueueFlushAll(pVM);
1591
1592 /*
1593 * PDM DMA transfers are pending.
1594 */
1595 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1596 PDMR3DmaRun(pVM);
1597
1598 /*
1599 * EMT Rendezvous (make sure they are handled before the requests).
1600 */
1601 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1602 {
1603 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1604 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1605 UPDATE_RC();
1606 /** @todo HACK ALERT! The following test is to make sure EM+TM
1607 * thinks the VM is stopped/reset before the next VM state change
1608 * is made. We need a better solution for this, or at least make it
1609 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1610 * VINF_EM_SUSPEND). */
1611 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1612 {
1613 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1614 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1615 return rc;
1616 }
1617 }
1618
1619 /*
1620 * Requests from other threads.
1621 */
1622 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1623 {
1624 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1625 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1626 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1627 {
1628 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1629 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1630 return rc2;
1631 }
1632 UPDATE_RC();
1633 /** @todo HACK ALERT! The following test is to make sure EM+TM
1634 * thinks the VM is stopped/reset before the next VM state change
1635 * is made. We need a better solution for this, or at least make it
1636 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1637 * VINF_EM_SUSPEND). */
1638 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1639 {
1640 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1641 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1642 return rc;
1643 }
1644 }
1645
1646 /* check that we got them all */
1647 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1648 }
1649
1650 /*
1651 * Normal priority then. (per-VCPU)
1652 * (Executed in no particular order.)
1653 */
1654 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1655 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1656 {
1657 /*
1658 * Requests from other threads.
1659 */
1660 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1661 {
1662 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1663 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1664 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1665 {
1666 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1667 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1668 return rc2;
1669 }
1670 UPDATE_RC();
1671 /** @todo HACK ALERT! The following test is to make sure EM+TM
1672 * thinks the VM is stopped/reset before the next VM state change
1673 * is made. We need a better solution for this, or at least make it
1674 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1675 * VINF_EM_SUSPEND). */
1676 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1677 {
1678 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1679 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1680 return rc;
1681 }
1682 }
1683
1684 /* check that we got them all */
1685 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1686 }
1687
1688 /*
1689 * High priority pre execution chunk last.
1690 * (Executed in ascending priority order.)
1691 */
1692 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1693 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1694 {
1695 /*
1696 * Timers before interrupts.
1697 */
1698 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1699 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1700 TMR3TimerQueuesDo(pVM);
1701
1702 /*
1703 * Pick up asynchronously posted interrupts into the APIC.
1704 */
1705 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1706 APICUpdatePendingInterrupts(pVCpu);
1707
1708 /*
1709 * The instruction following an emulated STI should *always* be executed!
1710 *
1711 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1712 * the eip is the same as the inhibited instr address. Before we
1713 * are able to execute this instruction in raw mode (iret to
1714 * guest code) an external interrupt might force a world switch
1715 * again. Possibly allowing a guest interrupt to be dispatched
1716 * in the process. This could break the guest. Sounds very
1717 * unlikely, but such timing sensitive problem are not as rare as
1718 * you might think.
1719 */
1720 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1721 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1722 {
1723 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1724 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1725 {
1726 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1727 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1728 }
1729 else
1730 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1731 }
1732
1733 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1734 * delivered. */
1735
1736#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1737 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
1738 {
1739 /*
1740 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1741 * Takes priority over even SMI and INIT signals.
1742 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1743 */
1744 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1745 {
1746 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1747 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1748 UPDATE_RC();
1749 }
1750
1751 /*
1752 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1753 * Takes priority over "Traps on the previous instruction".
1754 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1755 */
1756 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1757 {
1758 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1759 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1760 UPDATE_RC();
1761 }
1762
1763 /*
1764 * VMX Nested-guest preemption timer VM-exit.
1765 * Takes priority over NMI-window VM-exits.
1766 */
1767 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1768 {
1769 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1770 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1771 UPDATE_RC();
1772 }
1773 }
1774#endif
1775
1776 /*
1777 * Guest event injection.
1778 */
1779 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1780 bool fWakeupPending = false;
1781 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1782 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1783 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
1784 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1785 {
1786 bool fInVmxNonRootMode;
1787 bool fInSvmHwvirtMode;
1788 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
1789 if (fInNestedGuest)
1790 {
1791 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1792 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1793 }
1794 else
1795 {
1796 fInVmxNonRootMode = false;
1797 fInSvmHwvirtMode = false;
1798 }
1799
1800 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
1801 if (fGif)
1802 {
1803#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1804 /*
1805 * VMX NMI-window VM-exit.
1806 * Takes priority over non-maskable interrupts (NMIs).
1807 * Interrupt shadows block NMI-window VM-exits.
1808 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1809 *
1810 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1811 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1812 */
1813 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1814 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1815 {
1816 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1817 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1818 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1819 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1820 && rc2 != VINF_VMX_VMEXIT
1821 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1822 UPDATE_RC();
1823 }
1824 else
1825#endif
1826 /*
1827 * NMIs (take priority over external interrupts).
1828 */
1829 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1830 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1831 {
1832#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1833 if ( fInVmxNonRootMode
1834 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1835 {
1836 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1837 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1838 UPDATE_RC();
1839 }
1840 else
1841#endif
1842#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1843 if ( fInSvmHwvirtMode
1844 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1845 {
1846 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1847 AssertMsg( rc2 != VINF_SVM_VMEXIT
1848 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1849 UPDATE_RC();
1850 }
1851 else
1852#endif
1853 {
1854 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1855 if (rc2 == VINF_SUCCESS)
1856 {
1857 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1858 fWakeupPending = true;
1859 if (pVM->em.s.fIemExecutesAll)
1860 rc2 = VINF_EM_RESCHEDULE;
1861 else
1862 {
1863 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1864 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1865 : VINF_EM_RESCHEDULE_REM;
1866 }
1867 }
1868 UPDATE_RC();
1869 }
1870 }
1871#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1872 /*
1873 * VMX Interrupt-window VM-exits.
1874 * Takes priority over external interrupts.
1875 */
1876 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1877 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1878 {
1879 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1880 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1881 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1882 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1883 && rc2 != VINF_VMX_VMEXIT
1884 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1885 UPDATE_RC();
1886 }
1887#endif
1888#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1889 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1890 * actually pending like we currently do. */
1891#endif
1892 /*
1893 * External interrupts.
1894 */
1895 else
1896 {
1897 /*
1898 * VMX: virtual interrupts takes priority over physical interrupts.
1899 * SVM: physical interrupts takes priority over virtual interrupts.
1900 */
1901 if ( fInVmxNonRootMode
1902 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1903 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1904 {
1905 /** @todo NSTVMX: virtual-interrupt delivery. */
1906 rc2 = VINF_SUCCESS;
1907 }
1908 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1909 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1910 {
1911 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1912 if (fInVmxNonRootMode)
1913 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1914 else if (fInSvmHwvirtMode)
1915 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1916 else
1917 rc2 = VINF_NO_CHANGE;
1918
1919 if (rc2 == VINF_NO_CHANGE)
1920 {
1921 bool fInjected = false;
1922 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1923 /** @todo this really isn't nice, should properly handle this */
1924 /* Note! This can still cause a VM-exit (on Intel). */
1925 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1926 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1927 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1928 fWakeupPending = true;
1929 if ( pVM->em.s.fIemExecutesAll
1930 && ( rc2 == VINF_EM_RESCHEDULE_REM
1931 || rc2 == VINF_EM_RESCHEDULE_HM
1932 || rc2 == VINF_EM_RESCHEDULE_RAW))
1933 {
1934 rc2 = VINF_EM_RESCHEDULE;
1935 }
1936#ifdef VBOX_STRICT
1937 if (fInjected)
1938 rcIrq = rc2;
1939#endif
1940 }
1941 UPDATE_RC();
1942 }
1943 else if ( fInSvmHwvirtMode
1944 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1945 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1946 {
1947 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1948 if (rc2 == VINF_NO_CHANGE)
1949 {
1950 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1951 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1952 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1953 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1954 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1955 rc2 = VINF_EM_RESCHEDULE;
1956#ifdef VBOX_STRICT
1957 rcIrq = rc2;
1958#endif
1959 }
1960 UPDATE_RC();
1961 }
1962 }
1963 }
1964 }
1965
1966 /*
1967 * Allocate handy pages.
1968 */
1969 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1970 {
1971 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1972 UPDATE_RC();
1973 }
1974
1975 /*
1976 * Debugger Facility request.
1977 */
1978 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1979 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1980 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
1981 {
1982 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1983 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1984 UPDATE_RC();
1985 }
1986
1987 /*
1988 * EMT Rendezvous (must be serviced before termination).
1989 */
1990 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1991 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1992 {
1993 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1994 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1995 UPDATE_RC();
1996 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1997 * stopped/reset before the next VM state change is made. We need a better
1998 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1999 * && rc >= VINF_EM_SUSPEND). */
2000 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2001 {
2002 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2003 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2004 return rc;
2005 }
2006 }
2007
2008 /*
2009 * State change request (cleared by vmR3SetStateLocked).
2010 */
2011 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2012 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2013 {
2014 VMSTATE enmState = VMR3GetState(pVM);
2015 switch (enmState)
2016 {
2017 case VMSTATE_FATAL_ERROR:
2018 case VMSTATE_FATAL_ERROR_LS:
2019 case VMSTATE_GURU_MEDITATION:
2020 case VMSTATE_GURU_MEDITATION_LS:
2021 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2022 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2023 return VINF_EM_SUSPEND;
2024
2025 case VMSTATE_DESTROYING:
2026 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2027 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2028 return VINF_EM_TERMINATE;
2029
2030 default:
2031 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2032 }
2033 }
2034
2035 /*
2036 * Out of memory? Since most of our fellow high priority actions may cause us
2037 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2038 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2039 * than us since we can terminate without allocating more memory.
2040 */
2041 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2042 {
2043 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2044 UPDATE_RC();
2045 if (rc == VINF_EM_NO_MEMORY)
2046 return rc;
2047 }
2048
2049 /*
2050 * If the virtual sync clock is still stopped, make TM restart it.
2051 */
2052 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2053 TMR3VirtualSyncFF(pVM, pVCpu);
2054
2055#ifdef DEBUG
2056 /*
2057 * Debug, pause the VM.
2058 */
2059 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2060 {
2061 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2062 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2063 return VINF_EM_SUSPEND;
2064 }
2065#endif
2066
2067 /* check that we got them all */
2068 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2069 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2070 }
2071
2072#undef UPDATE_RC
2073 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2074 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2075 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2076 return rc;
2077}
2078
2079
2080/**
2081 * Check if the preset execution time cap restricts guest execution scheduling.
2082 *
2083 * @returns true if allowed, false otherwise
2084 * @param pVM The cross context VM structure.
2085 * @param pVCpu The cross context virtual CPU structure.
2086 */
2087bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2088{
2089 uint64_t u64UserTime, u64KernelTime;
2090
2091 if ( pVM->uCpuExecutionCap != 100
2092 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2093 {
2094 uint64_t u64TimeNow = RTTimeMilliTS();
2095 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2096 {
2097 /* New time slice. */
2098 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2099 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2100 pVCpu->em.s.u64TimeSliceExec = 0;
2101 }
2102 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2103
2104 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2105 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2106 return false;
2107 }
2108 return true;
2109}
2110
2111
2112/**
2113 * Execute VM.
2114 *
2115 * This function is the main loop of the VM. The emulation thread
2116 * calls this function when the VM has been successfully constructed
2117 * and we're ready for executing the VM.
2118 *
2119 * Returning from this function means that the VM is turned off or
2120 * suspended (state already saved) and deconstruction is next in line.
2121 *
2122 * All interaction from other thread are done using forced actions
2123 * and signalling of the wait object.
2124 *
2125 * @returns VBox status code, informational status codes may indicate failure.
2126 * @param pVM The cross context VM structure.
2127 * @param pVCpu The cross context virtual CPU structure.
2128 */
2129VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2130{
2131 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2132 pVM,
2133 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2134 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2135 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2136 VM_ASSERT_EMT(pVM);
2137 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2138 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2139 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2140 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2141
2142 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2143 if (rc == 0)
2144 {
2145 /*
2146 * Start the virtual time.
2147 */
2148 TMR3NotifyResume(pVM, pVCpu);
2149
2150 /*
2151 * The Outer Main Loop.
2152 */
2153 bool fFFDone = false;
2154
2155 /* Reschedule right away to start in the right state. */
2156 rc = VINF_SUCCESS;
2157
2158 /* If resuming after a pause or a state load, restore the previous
2159 state or else we'll start executing code. Else, just reschedule. */
2160 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2161 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2162 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2163 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2164 else
2165 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2166 pVCpu->em.s.cIemThenRemInstructions = 0;
2167 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2168
2169 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2170 for (;;)
2171 {
2172 /*
2173 * Before we can schedule anything (we're here because
2174 * scheduling is required) we must service any pending
2175 * forced actions to avoid any pending action causing
2176 * immediate rescheduling upon entering an inner loop
2177 *
2178 * Do forced actions.
2179 */
2180 if ( !fFFDone
2181 && RT_SUCCESS(rc)
2182 && rc != VINF_EM_TERMINATE
2183 && rc != VINF_EM_OFF
2184 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2185 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2186 {
2187 rc = emR3ForcedActions(pVM, pVCpu, rc);
2188 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2189 }
2190 else if (fFFDone)
2191 fFFDone = false;
2192
2193 /*
2194 * Now what to do?
2195 */
2196 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2197 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2198 switch (rc)
2199 {
2200 /*
2201 * Keep doing what we're currently doing.
2202 */
2203 case VINF_SUCCESS:
2204 break;
2205
2206 /*
2207 * Reschedule - to raw-mode execution.
2208 */
2209/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2210 case VINF_EM_RESCHEDULE_RAW:
2211 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2212 AssertLogRelFailed();
2213 pVCpu->em.s.enmState = EMSTATE_NONE;
2214 break;
2215
2216 /*
2217 * Reschedule - to HM or NEM.
2218 */
2219 case VINF_EM_RESCHEDULE_HM:
2220 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2221 if (VM_IS_HM_ENABLED(pVM))
2222 {
2223 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2224 pVCpu->em.s.enmState = EMSTATE_HM;
2225 }
2226 else if (VM_IS_NEM_ENABLED(pVM))
2227 {
2228 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2229 pVCpu->em.s.enmState = EMSTATE_NEM;
2230 }
2231 else
2232 {
2233 AssertLogRelFailed();
2234 pVCpu->em.s.enmState = EMSTATE_NONE;
2235 }
2236 break;
2237
2238 /*
2239 * Reschedule - to recompiled execution.
2240 */
2241 case VINF_EM_RESCHEDULE_REM:
2242 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2243 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2244 enmOldState, EMSTATE_IEM_THEN_REM));
2245 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2246 {
2247 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2248 pVCpu->em.s.cIemThenRemInstructions = 0;
2249 }
2250 break;
2251
2252 /*
2253 * Resume.
2254 */
2255 case VINF_EM_RESUME:
2256 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2257 /* Don't reschedule in the halted or wait for SIPI case. */
2258 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2259 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2260 {
2261 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2262 break;
2263 }
2264 /* fall through and get scheduled. */
2265 RT_FALL_THRU();
2266
2267 /*
2268 * Reschedule.
2269 */
2270 case VINF_EM_RESCHEDULE:
2271 {
2272 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2273 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2274 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2275 pVCpu->em.s.cIemThenRemInstructions = 0;
2276 pVCpu->em.s.enmState = enmState;
2277 break;
2278 }
2279
2280 /*
2281 * Halted.
2282 */
2283 case VINF_EM_HALT:
2284 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2285 pVCpu->em.s.enmState = EMSTATE_HALTED;
2286 break;
2287
2288 /*
2289 * Switch to the wait for SIPI state (application processor only)
2290 */
2291 case VINF_EM_WAIT_SIPI:
2292 Assert(pVCpu->idCpu != 0);
2293 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2294 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2295 break;
2296
2297
2298 /*
2299 * Suspend.
2300 */
2301 case VINF_EM_SUSPEND:
2302 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2303 Assert(enmOldState != EMSTATE_SUSPENDED);
2304 pVCpu->em.s.enmPrevState = enmOldState;
2305 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2306 break;
2307
2308 /*
2309 * Reset.
2310 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2311 */
2312 case VINF_EM_RESET:
2313 {
2314 if (pVCpu->idCpu == 0)
2315 {
2316 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2317 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2318 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2319 pVCpu->em.s.cIemThenRemInstructions = 0;
2320 pVCpu->em.s.enmState = enmState;
2321 }
2322 else
2323 {
2324 /* All other VCPUs go into the wait for SIPI state. */
2325 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2326 }
2327 break;
2328 }
2329
2330 /*
2331 * Power Off.
2332 */
2333 case VINF_EM_OFF:
2334 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2335 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2336 TMR3NotifySuspend(pVM, pVCpu);
2337 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2338 return rc;
2339
2340 /*
2341 * Terminate the VM.
2342 */
2343 case VINF_EM_TERMINATE:
2344 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2345 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2346 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2347 TMR3NotifySuspend(pVM, pVCpu);
2348 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2349 return rc;
2350
2351
2352 /*
2353 * Out of memory, suspend the VM and stuff.
2354 */
2355 case VINF_EM_NO_MEMORY:
2356 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2357 Assert(enmOldState != EMSTATE_SUSPENDED);
2358 pVCpu->em.s.enmPrevState = enmOldState;
2359 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2360 TMR3NotifySuspend(pVM, pVCpu);
2361 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2362
2363 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2364 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2365 if (rc != VINF_EM_SUSPEND)
2366 {
2367 if (RT_SUCCESS_NP(rc))
2368 {
2369 AssertLogRelMsgFailed(("%Rrc\n", rc));
2370 rc = VERR_EM_INTERNAL_ERROR;
2371 }
2372 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2373 }
2374 return rc;
2375
2376 /*
2377 * Guest debug events.
2378 */
2379 case VINF_EM_DBG_STEPPED:
2380 case VINF_EM_DBG_STOP:
2381 case VINF_EM_DBG_EVENT:
2382 case VINF_EM_DBG_BREAKPOINT:
2383 case VINF_EM_DBG_STEP:
2384 if (enmOldState == EMSTATE_RAW)
2385 {
2386 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2387 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2388 }
2389 else if (enmOldState == EMSTATE_HM)
2390 {
2391 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2392 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2393 }
2394 else if (enmOldState == EMSTATE_NEM)
2395 {
2396 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2397 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2398 }
2399 else if (enmOldState == EMSTATE_REM)
2400 {
2401 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2402 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2403 }
2404 else
2405 {
2406 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2407 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2408 }
2409 break;
2410
2411 /*
2412 * Hypervisor debug events.
2413 */
2414 case VINF_EM_DBG_HYPER_STEPPED:
2415 case VINF_EM_DBG_HYPER_BREAKPOINT:
2416 case VINF_EM_DBG_HYPER_ASSERTION:
2417 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2418 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2419 break;
2420
2421 /*
2422 * Triple fault.
2423 */
2424 case VINF_EM_TRIPLE_FAULT:
2425 if (!pVM->em.s.fGuruOnTripleFault)
2426 {
2427 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2428 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2429 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2430 continue;
2431 }
2432 /* Else fall through and trigger a guru. */
2433 RT_FALL_THRU();
2434
2435 case VERR_VMM_RING0_ASSERTION:
2436 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2437 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2438 break;
2439
2440 /*
2441 * Any error code showing up here other than the ones we
2442 * know and process above are considered to be FATAL.
2443 *
2444 * Unknown warnings and informational status codes are also
2445 * included in this.
2446 */
2447 default:
2448 if (RT_SUCCESS_NP(rc))
2449 {
2450 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2451 rc = VERR_EM_INTERNAL_ERROR;
2452 }
2453 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2454 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2455 break;
2456 }
2457
2458 /*
2459 * Act on state transition.
2460 */
2461 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2462 if (enmOldState != enmNewState)
2463 {
2464 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2465
2466 /* Clear MWait flags and the unhalt FF. */
2467 if ( enmOldState == EMSTATE_HALTED
2468 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2469 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2470 && ( enmNewState == EMSTATE_RAW
2471 || enmNewState == EMSTATE_HM
2472 || enmNewState == EMSTATE_NEM
2473 || enmNewState == EMSTATE_REM
2474 || enmNewState == EMSTATE_IEM_THEN_REM
2475 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2476 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2477 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2478 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2479 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2480 {
2481 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2482 {
2483 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2484 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2485 }
2486 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2487 {
2488 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2489 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2490 }
2491 }
2492 }
2493 else
2494 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2495
2496 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2497 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2498
2499 /*
2500 * Act on the new state.
2501 */
2502 switch (enmNewState)
2503 {
2504 /*
2505 * Execute raw.
2506 */
2507 case EMSTATE_RAW:
2508 AssertLogRelMsgFailed(("%Rrc\n", rc));
2509 rc = VERR_EM_INTERNAL_ERROR;
2510 break;
2511
2512 /*
2513 * Execute hardware accelerated raw.
2514 */
2515 case EMSTATE_HM:
2516 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2517 break;
2518
2519 /*
2520 * Execute hardware accelerated raw.
2521 */
2522 case EMSTATE_NEM:
2523 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2524 break;
2525
2526 /*
2527 * Execute recompiled.
2528 */
2529 case EMSTATE_REM:
2530 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2531 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2532 break;
2533
2534 /*
2535 * Execute in the interpreter.
2536 */
2537 case EMSTATE_IEM:
2538 {
2539 uint32_t cInstructions = 0;
2540#if 0 /* For testing purposes. */
2541 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2542 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2543 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2544 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2545 rc = VINF_SUCCESS;
2546 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2547#endif
2548 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2549 if (pVM->em.s.fIemExecutesAll)
2550 {
2551 Assert(rc != VINF_EM_RESCHEDULE_REM);
2552 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2553 Assert(rc != VINF_EM_RESCHEDULE_HM);
2554#ifdef VBOX_HIGH_RES_TIMERS_HACK
2555 if (cInstructions < 2048)
2556 TMTimerPollVoid(pVM, pVCpu);
2557#endif
2558 }
2559 fFFDone = false;
2560 break;
2561 }
2562
2563 /*
2564 * Execute in IEM, hoping we can quickly switch aback to HM
2565 * or RAW execution. If our hopes fail, we go to REM.
2566 */
2567 case EMSTATE_IEM_THEN_REM:
2568 {
2569 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2570 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2571 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2572 break;
2573 }
2574
2575 /*
2576 * Application processor execution halted until SIPI.
2577 */
2578 case EMSTATE_WAIT_SIPI:
2579 /* no break */
2580 /*
2581 * hlt - execution halted until interrupt.
2582 */
2583 case EMSTATE_HALTED:
2584 {
2585 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2586 /* If HM (or someone else) store a pending interrupt in
2587 TRPM, it must be dispatched ASAP without any halting.
2588 Anything pending in TRPM has been accepted and the CPU
2589 should already be the right state to receive it. */
2590 if (TRPMHasTrap(pVCpu))
2591 rc = VINF_EM_RESCHEDULE;
2592 /* MWAIT has a special extension where it's woken up when
2593 an interrupt is pending even when IF=0. */
2594 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2595 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2596 {
2597 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2598 if (rc == VINF_SUCCESS)
2599 {
2600 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2601 APICUpdatePendingInterrupts(pVCpu);
2602
2603 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2604 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2605 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2606 {
2607 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2608 rc = VINF_EM_RESCHEDULE;
2609 }
2610 }
2611 }
2612 else
2613 {
2614 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2615 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2616 check VMCPU_FF_UPDATE_APIC here. */
2617 if ( rc == VINF_SUCCESS
2618 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2619 {
2620 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2621 rc = VINF_EM_RESCHEDULE;
2622 }
2623 }
2624
2625 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2626 break;
2627 }
2628
2629 /*
2630 * Suspended - return to VM.cpp.
2631 */
2632 case EMSTATE_SUSPENDED:
2633 TMR3NotifySuspend(pVM, pVCpu);
2634 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2635 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2636 return VINF_EM_SUSPEND;
2637
2638 /*
2639 * Debugging in the guest.
2640 */
2641 case EMSTATE_DEBUG_GUEST_RAW:
2642 case EMSTATE_DEBUG_GUEST_HM:
2643 case EMSTATE_DEBUG_GUEST_NEM:
2644 case EMSTATE_DEBUG_GUEST_IEM:
2645 case EMSTATE_DEBUG_GUEST_REM:
2646 TMR3NotifySuspend(pVM, pVCpu);
2647 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2648 TMR3NotifyResume(pVM, pVCpu);
2649 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2650 break;
2651
2652 /*
2653 * Debugging in the hypervisor.
2654 */
2655 case EMSTATE_DEBUG_HYPER:
2656 {
2657 TMR3NotifySuspend(pVM, pVCpu);
2658 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2659
2660 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2661 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2662 if (rc != VINF_SUCCESS)
2663 {
2664 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2665 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2666 else
2667 {
2668 /* switch to guru meditation mode */
2669 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2670 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2671 VMMR3FatalDump(pVM, pVCpu, rc);
2672 }
2673 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2674 return rc;
2675 }
2676
2677 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2678 TMR3NotifyResume(pVM, pVCpu);
2679 break;
2680 }
2681
2682 /*
2683 * Guru meditation takes place in the debugger.
2684 */
2685 case EMSTATE_GURU_MEDITATION:
2686 {
2687 TMR3NotifySuspend(pVM, pVCpu);
2688 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2689 VMMR3FatalDump(pVM, pVCpu, rc);
2690 emR3Debug(pVM, pVCpu, rc);
2691 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2692 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2693 return rc;
2694 }
2695
2696 /*
2697 * The states we don't expect here.
2698 */
2699 case EMSTATE_NONE:
2700 case EMSTATE_TERMINATING:
2701 default:
2702 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2703 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2704 TMR3NotifySuspend(pVM, pVCpu);
2705 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2706 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2707 return VERR_EM_INTERNAL_ERROR;
2708 }
2709 } /* The Outer Main Loop */
2710 }
2711 else
2712 {
2713 /*
2714 * Fatal error.
2715 */
2716 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2717 TMR3NotifySuspend(pVM, pVCpu);
2718 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2719 VMMR3FatalDump(pVM, pVCpu, rc);
2720 emR3Debug(pVM, pVCpu, rc);
2721 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2722 /** @todo change the VM state! */
2723 return rc;
2724 }
2725
2726 /* not reached */
2727}
2728
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette