VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 100140

Last change on this file since 100140 was 100140, checked in by vboxsync, 17 months ago

VMM/EM: Do not do scheduling based on whether HM has been used and is 'active', because that's not a reliable property (especially after restoring saved state) and it's not correct to go to the recompiler all the time after HM was unable to execute a piece of code. This is probably a problem resurfacing after kicking out the IEM_THEN_REM state from EM and resurrecting the REM state. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.9 KB
Line 
1/* $Id: EM.cpp 100140 2023-06-09 14:54:38Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RmExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/err.h>
73#include "VMMTracing.h"
74
75#include <iprt/asm.h>
76#include <iprt/string.h>
77#include <iprt/stream.h>
78#include <iprt/thread.h>
79
80#include "EMInline.h"
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
88#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
89static const char *emR3GetStateName(EMSTATE enmState);
90#endif
91static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
92
93
94/**
95 * Initializes the EM.
96 *
97 * @returns VBox status code.
98 * @param pVM The cross context VM structure.
99 */
100VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
101{
102 LogFlow(("EMR3Init\n"));
103 /*
104 * Assert alignment and sizes.
105 */
106 AssertCompileMemberAlignment(VM, em.s, 32);
107 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
108 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
109 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
110
111 /*
112 * Init the structure.
113 */
114 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
115 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
116
117 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
118#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) && !defined(VBOX_VMM_TARGET_ARMV8)
119 true
120#else
121 false
122#endif
123 );
124 AssertLogRelRCReturn(rc, rc);
125
126 bool fEnabled;
127 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->em.s.fGuruOnTripleFault = !fEnabled;
130 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
131 {
132 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
133 pVM->em.s.fGuruOnTripleFault = true;
134 }
135
136 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
137
138 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
139 * Whether to try correlate exit history in any context, detect hot spots and
140 * try optimize these using IEM if there are other exits close by. This
141 * overrides the context specific settings. */
142 bool fExitOptimizationEnabled = true;
143 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
144 AssertLogRelRCReturn(rc, rc);
145
146 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
147 * Whether to optimize exits in ring-0. Setting this to false will also disable
148 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
149 * capabilities of the host kernel, this optimization may be unavailable. */
150 bool fExitOptimizationEnabledR0 = true;
151 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
152 AssertLogRelRCReturn(rc, rc);
153 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
154
155 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
156 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
157 * hooks are in effect). */
158 /** @todo change the default to true here */
159 bool fExitOptimizationEnabledR0PreemptDisabled = true;
160 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
161 AssertLogRelRCReturn(rc, rc);
162 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
163
164 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
165 * Maximum number of instruction to let EMHistoryExec execute in one go. */
166 uint16_t cHistoryExecMaxInstructions = 8192;
167 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryExecMaxInstructions < 16)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
171
172 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
173 * Maximum number of instruction between exits during probing. */
174 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
175#ifdef RT_OS_WINDOWS
176 if (VM_IS_NEM_ENABLED(pVM))
177 cHistoryProbeMaxInstructionsWithoutExit = 32;
178#endif
179 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
180 cHistoryProbeMaxInstructionsWithoutExit);
181 AssertLogRelRCReturn(rc, rc);
182 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
183 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
184 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
185
186 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
187 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
188 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
189 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
190 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
191 cHistoryProbeMinInstructions);
192 AssertLogRelRCReturn(rc, rc);
193
194 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
195 {
196 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
197 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
198 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
199 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
200 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
201 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
202 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
203 }
204
205 /*
206 * Saved state.
207 */
208 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
209 NULL, NULL, NULL,
210 NULL, emR3Save, NULL,
211 NULL, emR3Load, NULL);
212 if (RT_FAILURE(rc))
213 return rc;
214
215 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
216 {
217 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
218
219 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
220 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
221 pVCpu->em.s.msTimeSliceStart = 0; /* paranoia */
222 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
223
224# define EM_REG_COUNTER(a, b, c) \
225 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
226 AssertRC(rc);
227
228# define EM_REG_COUNTER_USED(a, b, c) \
229 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
230 AssertRC(rc);
231
232# define EM_REG_PROFILE(a, b, c) \
233 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
234 AssertRC(rc);
235
236# define EM_REG_PROFILE_ADV(a, b, c) \
237 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
238 AssertRC(rc);
239
240 /*
241 * Statistics.
242 */
243#ifdef VBOX_WITH_STATISTICS
244 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
245 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
246
247 /* these should be considered for release statistics. */
248 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
249 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
250 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
251#endif
252 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
253 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
254#ifdef VBOX_WITH_STATISTICS
255 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
256 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
257 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
258#endif
259 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
260 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
261#ifdef VBOX_WITH_STATISTICS
262 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
263#endif
264
265 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
266 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
267 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
268 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RecompilerExecute (excluding FFs).");
269
270 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
271
272 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
273 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
274 AssertRC(rc);
275
276 /* History record statistics */
277 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
278 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
279 AssertRC(rc);
280
281 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
282 {
283 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
284 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
285 AssertRC(rc);
286 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
287 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
288 AssertRC(rc);
289 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
290 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
291 AssertRC(rc);
292 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
293 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
294 AssertRC(rc);
295 }
296
297 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
298 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
299 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
300 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
301 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
302 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
303 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
304 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
305 }
306
307 emR3InitDbg(pVM);
308 return VINF_SUCCESS;
309}
310
311
312/**
313 * Called when a VM initialization stage is completed.
314 *
315 * @returns VBox status code.
316 * @param pVM The cross context VM structure.
317 * @param enmWhat The initialization state that was completed.
318 */
319VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
320{
321 if (enmWhat == VMINITCOMPLETED_RING0)
322 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
323 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
324 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
325 return VINF_SUCCESS;
326}
327
328
329/**
330 * Applies relocations to data and code managed by this
331 * component. This function will be called at init and
332 * whenever the VMM need to relocate it self inside the GC.
333 *
334 * @param pVM The cross context VM structure.
335 */
336VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
337{
338 LogFlow(("EMR3Relocate\n"));
339 RT_NOREF(pVM);
340}
341
342
343/**
344 * Reset the EM state for a CPU.
345 *
346 * Called by EMR3Reset and hot plugging.
347 *
348 * @param pVCpu The cross context virtual CPU structure.
349 */
350VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
351{
352 /* Reset scheduling state. */
353 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
354
355 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
356 out of the HALTED state here so that enmPrevState doesn't end up as
357 HALTED when EMR3Execute returns. */
358 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
359 {
360 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
361 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
362 }
363}
364
365
366/**
367 * Reset notification.
368 *
369 * @param pVM The cross context VM structure.
370 */
371VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
372{
373 Log(("EMR3Reset: \n"));
374 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
375 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
376}
377
378
379/**
380 * Terminates the EM.
381 *
382 * Termination means cleaning up and freeing all resources,
383 * the VM it self is at this point powered off or suspended.
384 *
385 * @returns VBox status code.
386 * @param pVM The cross context VM structure.
387 */
388VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
389{
390 RT_NOREF(pVM);
391 return VINF_SUCCESS;
392}
393
394
395/**
396 * Execute state save operation.
397 *
398 * @returns VBox status code.
399 * @param pVM The cross context VM structure.
400 * @param pSSM SSM operation handle.
401 */
402static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
403{
404 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
405 {
406 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
407
408 SSMR3PutBool(pSSM, false /*fForceRAW*/);
409
410 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
411 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
412 SSMR3PutU32(pSSM,
413 pVCpu->em.s.enmPrevState == EMSTATE_NONE
414 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED
415 || pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
416 ? pVCpu->em.s.enmPrevState : EMSTATE_NONE);
417
418 /* Save mwait state. */
419 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
420 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
421 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
422 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
423 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
424 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
425 AssertRCReturn(rc, rc);
426 }
427 return VINF_SUCCESS;
428}
429
430
431/**
432 * Execute state load operation.
433 *
434 * @returns VBox status code.
435 * @param pVM The cross context VM structure.
436 * @param pSSM SSM operation handle.
437 * @param uVersion Data layout version.
438 * @param uPass The data pass.
439 */
440static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
441{
442 /*
443 * Validate version.
444 */
445 if ( uVersion > EM_SAVED_STATE_VERSION
446 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
447 {
448 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
449 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
450 }
451 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
452
453 /*
454 * Load the saved state.
455 */
456 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
457 {
458 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
459
460 bool fForceRAWIgnored;
461 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
462 AssertRCReturn(rc, rc);
463
464 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
465 {
466 /* We are only intereseted in two enmPrevState values for use when
467 EMR3ExecuteVM is called.
468 Since ~r157540. only these two and EMSTATE_NONE are saved. */
469 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
470 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
471 if ( pVCpu->em.s.enmPrevState != EMSTATE_WAIT_SIPI
472 && pVCpu->em.s.enmPrevState != EMSTATE_HALTED)
473 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
474
475 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
476 }
477 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
478 {
479 /* Load mwait state. */
480 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
481 AssertRCReturn(rc, rc);
482 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
483 AssertRCReturn(rc, rc);
484 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
485 AssertRCReturn(rc, rc);
486 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
487 AssertRCReturn(rc, rc);
488 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
489 AssertRCReturn(rc, rc);
490 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
491 AssertRCReturn(rc, rc);
492 }
493 }
494 return VINF_SUCCESS;
495}
496
497
498/**
499 * Argument packet for emR3SetExecutionPolicy.
500 */
501struct EMR3SETEXECPOLICYARGS
502{
503 EMEXECPOLICY enmPolicy;
504 bool fEnforce;
505};
506
507
508/**
509 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
510 */
511static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
512{
513 /*
514 * Only the first CPU changes the variables.
515 */
516 if (pVCpu->idCpu == 0)
517 {
518 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
519 switch (pArgs->enmPolicy)
520 {
521 case EMEXECPOLICY_IEM_ALL:
522 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
523
524 /* For making '.alliem 1' useful during debugging, transition the
525 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
526 for (VMCPUID i = 0; i < pVM->cCpus; i++)
527 {
528 PVMCPU pVCpuX = pVM->apCpusR3[i];
529 switch (pVCpuX->em.s.enmState)
530 {
531 case EMSTATE_DEBUG_GUEST_RECOMPILER:
532 if (pVM->em.s.fIemRecompiled)
533 break;
534 RT_FALL_THROUGH();
535 case EMSTATE_DEBUG_GUEST_RAW:
536 case EMSTATE_DEBUG_GUEST_HM:
537 case EMSTATE_DEBUG_GUEST_NEM:
538 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
539 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
540 break;
541 case EMSTATE_DEBUG_GUEST_IEM:
542 default:
543 break;
544 }
545 }
546 break;
547
548 case EMEXECPOLICY_IEM_RECOMPILED:
549 pVM->em.s.fIemRecompiled = pArgs->fEnforce;
550 break;
551
552 default:
553 AssertFailedReturn(VERR_INVALID_PARAMETER);
554 }
555 Log(("EM: Set execution policy: fIemExecutesAll=%RTbool fIemRecompiled=%RTbool\n",
556 pVM->em.s.fIemExecutesAll, pVM->em.s.fIemRecompiled));
557 }
558
559 /*
560 * Force rescheduling if in HM, NEM, IEM/interpreter or IEM/recompiler.
561 */
562 Assert(pVCpu->em.s.enmState != EMSTATE_RAW_OBSOLETE);
563 return pVCpu->em.s.enmState == EMSTATE_HM
564 || pVCpu->em.s.enmState == EMSTATE_NEM
565 || pVCpu->em.s.enmState == EMSTATE_IEM
566 || pVCpu->em.s.enmState == EMSTATE_RECOMPILER
567 ? VINF_EM_RESCHEDULE
568 : VINF_SUCCESS;
569}
570
571
572/**
573 * Changes an execution scheduling policy parameter.
574 *
575 * This is used to enable or disable raw-mode / hardware-virtualization
576 * execution of user and supervisor code.
577 *
578 * @returns VINF_SUCCESS on success.
579 * @returns VINF_RESCHEDULE if a rescheduling might be required.
580 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
581 *
582 * @param pUVM The user mode VM handle.
583 * @param enmPolicy The scheduling policy to change.
584 * @param fEnforce Whether to enforce the policy or not.
585 */
586VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
587{
588 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
589 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
590 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
591
592 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
593 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
594}
595
596
597/**
598 * Queries an execution scheduling policy parameter.
599 *
600 * @returns VBox status code
601 * @param pUVM The user mode VM handle.
602 * @param enmPolicy The scheduling policy to query.
603 * @param pfEnforced Where to return the current value.
604 */
605VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
606{
607 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
608 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
609 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
610 PVM pVM = pUVM->pVM;
611 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
612
613 /* No need to bother EMTs with a query. */
614 switch (enmPolicy)
615 {
616 case EMEXECPOLICY_IEM_ALL:
617 *pfEnforced = pVM->em.s.fIemExecutesAll;
618 break;
619 case EMEXECPOLICY_IEM_RECOMPILED:
620 *pfEnforced = pVM->em.s.fIemRecompiled;
621 break;
622 default:
623 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
624 }
625
626 return VINF_SUCCESS;
627}
628
629
630/**
631 * Queries the main execution engine of the VM.
632 *
633 * @returns VBox status code
634 * @param pUVM The user mode VM handle.
635 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
636 */
637VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
638{
639 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
640 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
641
642 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
643 PVM pVM = pUVM->pVM;
644 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
645
646 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
647 return VINF_SUCCESS;
648}
649
650
651/**
652 * Raise a fatal error.
653 *
654 * Safely terminate the VM with full state report and stuff. This function
655 * will naturally never return.
656 *
657 * @param pVCpu The cross context virtual CPU structure.
658 * @param rc VBox status code.
659 */
660VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
661{
662 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
663 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
664}
665
666
667#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
668/**
669 * Gets the EM state name.
670 *
671 * @returns pointer to read only state name,
672 * @param enmState The state.
673 */
674static const char *emR3GetStateName(EMSTATE enmState)
675{
676 switch (enmState)
677 {
678 case EMSTATE_NONE: return "EMSTATE_NONE";
679 case EMSTATE_RAW_OBSOLETE: return "EMSTATE_RAW_OBSOLETE";
680 case EMSTATE_HM: return "EMSTATE_HM";
681 case EMSTATE_IEM: return "EMSTATE_IEM";
682 case EMSTATE_RECOMPILER: return "EMSTATE_RECOMPILER";
683 case EMSTATE_HALTED: return "EMSTATE_HALTED";
684 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
685 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
686 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
687 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
688 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
689 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
690 case EMSTATE_DEBUG_GUEST_RECOMPILER: return "EMSTATE_DEBUG_GUEST_RECOMPILER";
691 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
692 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
693 case EMSTATE_IEM_THEN_REM_OBSOLETE: return "EMSTATE_IEM_THEN_REM_OBSOLETE";
694 case EMSTATE_NEM: return "EMSTATE_NEM";
695 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
696 default: return "Unknown!";
697 }
698}
699#endif /* LOG_ENABLED || VBOX_STRICT */
700
701
702#if !defined(VBOX_VMM_TARGET_ARMV8)
703/**
704 * Handle pending ring-3 I/O port write.
705 *
706 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
707 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
708 *
709 * @returns Strict VBox status code.
710 * @param pVM The cross context VM structure.
711 * @param pVCpu The cross context virtual CPU structure.
712 */
713VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
714{
715 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
716
717 /* Get and clear the pending data. */
718 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
719 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
720 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
721 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
722 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
723
724 /* Assert sanity. */
725 switch (cbValue)
726 {
727 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
728 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
729 case 4: break;
730 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
731 }
732 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
733
734 /* Do the work.*/
735 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
736 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
737 if (IOM_SUCCESS(rcStrict))
738 {
739 pVCpu->cpum.GstCtx.rip += cbInstr;
740 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
741 }
742 return rcStrict;
743}
744
745
746/**
747 * Handle pending ring-3 I/O port write.
748 *
749 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
750 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
751 *
752 * @returns Strict VBox status code.
753 * @param pVM The cross context VM structure.
754 * @param pVCpu The cross context virtual CPU structure.
755 */
756VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
757{
758 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
759
760 /* Get and clear the pending data. */
761 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
762 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
763 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
764 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
765
766 /* Assert sanity. */
767 switch (cbValue)
768 {
769 case 1: break;
770 case 2: break;
771 case 4: break;
772 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
773 }
774 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
775 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
776
777 /* Do the work.*/
778 uint32_t uValue = 0;
779 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
780 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
781 if (IOM_SUCCESS(rcStrict))
782 {
783 if (cbValue == 4)
784 pVCpu->cpum.GstCtx.rax = uValue;
785 else if (cbValue == 2)
786 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
787 else
788 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
789 pVCpu->cpum.GstCtx.rip += cbInstr;
790 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
791 }
792 return rcStrict;
793}
794
795
796/**
797 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
798 * Worker for emR3ExecuteSplitLockInstruction}
799 */
800static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
801{
802 /* Only execute on the specified EMT. */
803 if (pVCpu == (PVMCPU)pvUser)
804 {
805 LogFunc(("\n"));
806 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
807 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
808 if (rcStrict == VINF_IEM_RAISED_XCPT)
809 rcStrict = VINF_SUCCESS;
810 return rcStrict;
811 }
812 RT_NOREF(pVM);
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Handle an instruction causing a split cacheline lock access in SMP VMs.
819 *
820 * Generally we only get here if the host has split-lock detection enabled and
821 * this caused an \#AC because of something the guest did. If we interpret the
822 * instruction as-is, we'll likely just repeat the split-lock access and
823 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
824 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
825 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
826 * disregard the lock prefix when emulating the instruction.
827 *
828 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
829 * feature when entering guest context, but the support for the feature isn't a
830 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
831 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
832 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
833 * propert detection to SUPDrv later if we find it necessary.
834 *
835 * @see @bugref{10052}
836 *
837 * @returns Strict VBox status code.
838 * @param pVM The cross context VM structure.
839 * @param pVCpu The cross context virtual CPU structure.
840 */
841VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
842{
843 LogFunc(("\n"));
844 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
845}
846#endif /* VBOX_VMM_TARGET_ARMV8 */
847
848
849/**
850 * Debug loop.
851 *
852 * @returns VBox status code for EM.
853 * @param pVM The cross context VM structure.
854 * @param pVCpu The cross context virtual CPU structure.
855 * @param rc Current EM VBox status code.
856 */
857static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
858{
859 for (;;)
860 {
861 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
862 const VBOXSTRICTRC rcLast = rc;
863
864 /*
865 * Debug related RC.
866 */
867 switch (VBOXSTRICTRC_VAL(rc))
868 {
869 /*
870 * Single step an instruction.
871 */
872 case VINF_EM_DBG_STEP:
873 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
874 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
875 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
876#if !defined(VBOX_VMM_TARGET_ARMV8)
877 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
878 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
879#endif
880 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
881 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
882 else
883 {
884 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
885 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
886 rc = VINF_EM_DBG_STEPPED;
887 }
888 break;
889
890 /*
891 * Simple events: stepped, breakpoint, stop/assertion.
892 */
893 case VINF_EM_DBG_STEPPED:
894 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
895 break;
896
897 case VINF_EM_DBG_BREAKPOINT:
898 rc = DBGFR3BpHit(pVM, pVCpu);
899 break;
900
901 case VINF_EM_DBG_STOP:
902 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
903 break;
904
905 case VINF_EM_DBG_EVENT:
906 rc = DBGFR3EventHandlePending(pVM, pVCpu);
907 break;
908
909 case VINF_EM_DBG_HYPER_STEPPED:
910 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
911 break;
912
913 case VINF_EM_DBG_HYPER_BREAKPOINT:
914 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
915 break;
916
917 case VINF_EM_DBG_HYPER_ASSERTION:
918 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
919 RTLogFlush(NULL);
920 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
921 break;
922
923 /*
924 * Guru meditation.
925 */
926 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
927 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
928 break;
929 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
930 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
931 break;
932
933 default: /** @todo don't use default for guru, but make special errors code! */
934 {
935 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
936 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
937 break;
938 }
939 }
940
941 /*
942 * Process the result.
943 */
944 switch (VBOXSTRICTRC_VAL(rc))
945 {
946 /*
947 * Continue the debugging loop.
948 */
949 case VINF_EM_DBG_STEP:
950 case VINF_EM_DBG_STOP:
951 case VINF_EM_DBG_EVENT:
952 case VINF_EM_DBG_STEPPED:
953 case VINF_EM_DBG_BREAKPOINT:
954 case VINF_EM_DBG_HYPER_STEPPED:
955 case VINF_EM_DBG_HYPER_BREAKPOINT:
956 case VINF_EM_DBG_HYPER_ASSERTION:
957 break;
958
959 /*
960 * Resuming execution (in some form) has to be done here if we got
961 * a hypervisor debug event.
962 */
963 case VINF_SUCCESS:
964 case VINF_EM_RESUME:
965 case VINF_EM_SUSPEND:
966 case VINF_EM_RESCHEDULE:
967 case VINF_EM_RESCHEDULE_RAW:
968 case VINF_EM_RESCHEDULE_REM:
969 case VINF_EM_HALT:
970 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
971 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
972 if (rc == VINF_SUCCESS)
973 rc = VINF_EM_RESCHEDULE;
974 return rc;
975
976 /*
977 * The debugger isn't attached.
978 * We'll simply turn the thing off since that's the easiest thing to do.
979 */
980 case VERR_DBGF_NOT_ATTACHED:
981 switch (VBOXSTRICTRC_VAL(rcLast))
982 {
983 case VINF_EM_DBG_HYPER_STEPPED:
984 case VINF_EM_DBG_HYPER_BREAKPOINT:
985 case VINF_EM_DBG_HYPER_ASSERTION:
986 case VERR_TRPM_PANIC:
987 case VERR_TRPM_DONT_PANIC:
988 case VERR_VMM_RING0_ASSERTION:
989 case VERR_VMM_HYPER_CR3_MISMATCH:
990 case VERR_VMM_RING3_CALL_DISABLED:
991 return rcLast;
992 }
993 return VINF_EM_OFF;
994
995 /*
996 * Status codes terminating the VM in one or another sense.
997 */
998 case VINF_EM_TERMINATE:
999 case VINF_EM_OFF:
1000 case VINF_EM_RESET:
1001 case VINF_EM_NO_MEMORY:
1002 case VINF_EM_RAW_STALE_SELECTOR:
1003 case VINF_EM_RAW_IRET_TRAP:
1004 case VERR_TRPM_PANIC:
1005 case VERR_TRPM_DONT_PANIC:
1006 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1007 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1008 case VERR_VMM_RING0_ASSERTION:
1009 case VERR_VMM_HYPER_CR3_MISMATCH:
1010 case VERR_VMM_RING3_CALL_DISABLED:
1011 case VERR_INTERNAL_ERROR:
1012 case VERR_INTERNAL_ERROR_2:
1013 case VERR_INTERNAL_ERROR_3:
1014 case VERR_INTERNAL_ERROR_4:
1015 case VERR_INTERNAL_ERROR_5:
1016 case VERR_IPE_UNEXPECTED_STATUS:
1017 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1018 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1019 return rc;
1020
1021 /*
1022 * The rest is unexpected, and will keep us here.
1023 */
1024 default:
1025 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1026 break;
1027 }
1028 } /* debug for ever */
1029}
1030
1031
1032/**
1033 * Executes recompiled code.
1034 *
1035 * This function contains the recompiler version of the inner
1036 * execution loop (the outer loop being in EMR3ExecuteVM()).
1037 *
1038 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1039 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1040 *
1041 * @param pVM The cross context VM structure.
1042 * @param pVCpu The cross context virtual CPU structure.
1043 * @param pfFFDone Where to store an indicator telling whether or not
1044 * FFs were done before returning.
1045 *
1046 */
1047static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1048{
1049 STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a);
1050#ifdef VBOX_VMM_TARGET_ARMV8
1051 LogFlow(("emR3RecompilerExecute/%u: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64));
1052#else
1053 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
1054#endif
1055
1056 /*
1057 * Loop till we get a forced action which returns anything but VINF_SUCCESS.
1058 */
1059 *pfFFDone = false;
1060 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1061 for (;;)
1062 {
1063#ifdef LOG_ENABLED
1064# if defined(VBOX_VMM_TARGET_ARMV8)
1065 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1066# else
1067 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1068 Log(("EMR%d: %04X:%08RX64 RSP=%08RX64 IF=%d CR0=%x eflags=%x\n", CPUMGetGuestCPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel,
1069 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF,
1070 (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1071 else
1072 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1073# endif
1074#endif
1075
1076 /*
1077 * Execute.
1078 */
1079 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1080 {
1081 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1082#ifdef VBOX_WITH_IEM_RECOMPILER
1083 if (pVM->em.s.fIemRecompiled)
1084 rcStrict = IEMExecRecompilerThreaded(pVM, pVCpu);
1085 else
1086#endif
1087 rcStrict = IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/);
1088 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1089 }
1090 else
1091 {
1092 /* Give up this time slice; virtual time continues */
1093 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1094 RTThreadSleep(5);
1095 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1096 rcStrict = VINF_SUCCESS;
1097 }
1098
1099 /*
1100 * Deal with high priority post execution FFs before doing anything
1101 * else. Sync back the state and leave the lock to be on the safe side.
1102 */
1103 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1104 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1105 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
1106
1107 /*
1108 * Process the returned status code.
1109 */
1110 if (rcStrict != VINF_SUCCESS)
1111 {
1112#if 0
1113 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST))
1114 break;
1115 /* Fatal error: */
1116#endif
1117 break;
1118 }
1119
1120
1121 /*
1122 * Check and execute forced actions.
1123 *
1124 * Sync back the VM state and leave the lock before calling any of
1125 * these, you never know what's going to happen here.
1126 */
1127#ifdef VBOX_HIGH_RES_TIMERS_HACK
1128 TMTimerPollVoid(pVM, pVCpu);
1129#endif
1130 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1131 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1132 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1133 {
1134 rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
1135 VBOXVMM_EM_FF_ALL_RET(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1136 if ( rcStrict != VINF_SUCCESS
1137 && rcStrict != VINF_EM_RESCHEDULE_REM)
1138 {
1139 *pfFFDone = true;
1140 break;
1141 }
1142 }
1143
1144 } /* The Inner Loop, recompiled execution mode version. */
1145
1146 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatREMTotal, a);
1147 return rcStrict;
1148}
1149
1150
1151/**
1152 * Decides whether to execute HM, NEM, IEM/interpreter or IEM/recompiler.
1153 *
1154 * @returns new EM state
1155 * @param pVM The cross context VM structure.
1156 * @param pVCpu The cross context virtual CPU structure.
1157 */
1158EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1159{
1160 /*
1161 * We stay in the wait for SIPI state unless explicitly told otherwise.
1162 */
1163 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1164 return EMSTATE_WAIT_SIPI;
1165
1166 /*
1167 * Execute everything in IEM?
1168 */
1169 if ( pVM->em.s.fIemExecutesAll
1170 || VM_IS_EXEC_ENGINE_IEM(pVM))
1171#ifdef VBOX_WITH_IEM_RECOMPILER
1172 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1173#else
1174 return EMSTATE_IEM;
1175#endif
1176
1177#if !defined(VBOX_VMM_TARGET_ARMV8)
1178 if (VM_IS_HM_ENABLED(pVM))
1179 {
1180 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1181 return EMSTATE_HM;
1182 }
1183 else
1184#endif
1185 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1186 return EMSTATE_NEM;
1187
1188 /*
1189 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1190 * turns off monitoring features essential for raw mode!
1191 */
1192#ifdef VBOX_WITH_IEM_RECOMPILER
1193 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1194#else
1195 return EMSTATE_IEM;
1196#endif
1197}
1198
1199
1200/**
1201 * Executes all high priority post execution force actions.
1202 *
1203 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1204 * fatal error status code.
1205 *
1206 * @param pVM The cross context VM structure.
1207 * @param pVCpu The cross context virtual CPU structure.
1208 * @param rc The current strict VBox status code rc.
1209 */
1210VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1211{
1212 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1213
1214 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1215 PDMCritSectBothFF(pVM, pVCpu);
1216
1217#if !defined(VBOX_VMM_TARGET_ARMV8)
1218 /* Update CR3 (Nested Paging case for HM). */
1219 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1220 {
1221 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1222 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1223 if (RT_FAILURE(rc2))
1224 return rc2;
1225 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1226 }
1227#endif
1228
1229 /* IEM has pending work (typically memory write after INS instruction). */
1230 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1231 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1232
1233 /* IOM has pending work (comitting an I/O or MMIO write). */
1234 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1235 {
1236 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1237 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1238 { /* half likely, or at least it's a line shorter. */ }
1239 else if (rc == VINF_SUCCESS)
1240 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1241 else
1242 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1243 }
1244
1245 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1246 {
1247 if ( rc > VINF_EM_NO_MEMORY
1248 && rc <= VINF_EM_LAST)
1249 rc = VINF_EM_NO_MEMORY;
1250 }
1251
1252 return rc;
1253}
1254
1255
1256#if !defined(VBOX_VMM_TARGET_ARMV8)
1257/**
1258 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1259 *
1260 * @returns VBox status code.
1261 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1262 * @param pVCpu The cross context virtual CPU structure.
1263 */
1264static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1265{
1266#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1267 /* Handle the "external interrupt" VM-exit intercept. */
1268 if ( CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1269 && !CPUMIsGuestVmxExitCtlsSet(&pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1270 {
1271 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1272 AssertMsg( rcStrict != VINF_VMX_VMEXIT /* VM-exit should have been converted to VINF_SUCCESS. */
1273 && rcStrict != VINF_NO_CHANGE
1274 && rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1275 return VBOXSTRICTRC_VAL(rcStrict);
1276 }
1277#else
1278 RT_NOREF(pVCpu);
1279#endif
1280 return VINF_NO_CHANGE;
1281}
1282
1283
1284/**
1285 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1286 *
1287 * @returns VBox status code.
1288 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1289 * @param pVCpu The cross context virtual CPU structure.
1290 */
1291static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1292{
1293#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1294 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1295 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1296 {
1297 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1298 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1299 if (RT_SUCCESS(rcStrict))
1300 {
1301 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1302 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1303 return VBOXSTRICTRC_VAL(rcStrict);
1304 }
1305
1306 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1307 return VINF_EM_TRIPLE_FAULT;
1308 }
1309#else
1310 NOREF(pVCpu);
1311#endif
1312 return VINF_NO_CHANGE;
1313}
1314
1315
1316/**
1317 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1318 *
1319 * @returns VBox status code.
1320 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1321 * @param pVCpu The cross context virtual CPU structure.
1322 */
1323static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1324{
1325#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1326 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1327 {
1328 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1329 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1330 if (RT_SUCCESS(rcStrict))
1331 {
1332 Assert(rcStrict != VINF_SVM_VMEXIT);
1333 return VBOXSTRICTRC_VAL(rcStrict);
1334 }
1335 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1336 return VINF_EM_TRIPLE_FAULT;
1337 }
1338#else
1339 NOREF(pVCpu);
1340#endif
1341 return VINF_NO_CHANGE;
1342}
1343#endif
1344
1345
1346/**
1347 * Executes all pending forced actions.
1348 *
1349 * Forced actions can cause execution delays and execution
1350 * rescheduling. The first we deal with using action priority, so
1351 * that for instance pending timers aren't scheduled and ran until
1352 * right before execution. The rescheduling we deal with using
1353 * return codes. The same goes for VM termination, only in that case
1354 * we exit everything.
1355 *
1356 * @returns VBox status code of equal or greater importance/severity than rc.
1357 * The most important ones are: VINF_EM_RESCHEDULE,
1358 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1359 *
1360 * @param pVM The cross context VM structure.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param rc The current rc.
1363 *
1364 */
1365int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1366{
1367 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1368#ifdef VBOX_STRICT
1369 int rcIrq = VINF_SUCCESS;
1370#endif
1371 int rc2;
1372#define UPDATE_RC() \
1373 do { \
1374 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1375 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1376 break; \
1377 if (!rc || rc2 < rc) \
1378 rc = rc2; \
1379 } while (0)
1380 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1381
1382 /*
1383 * Post execution chunk first.
1384 */
1385 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1386 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1387 {
1388 /*
1389 * EMT Rendezvous (must be serviced before termination).
1390 */
1391 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1392 {
1393 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1394 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1395 UPDATE_RC();
1396 /** @todo HACK ALERT! The following test is to make sure EM+TM
1397 * thinks the VM is stopped/reset before the next VM state change
1398 * is made. We need a better solution for this, or at least make it
1399 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1400 * VINF_EM_SUSPEND). */
1401 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1402 {
1403 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1404 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1405 return rc;
1406 }
1407 }
1408
1409 /*
1410 * State change request (cleared by vmR3SetStateLocked).
1411 */
1412 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1413 {
1414 VMSTATE enmState = VMR3GetState(pVM);
1415 switch (enmState)
1416 {
1417 case VMSTATE_FATAL_ERROR:
1418 case VMSTATE_FATAL_ERROR_LS:
1419 case VMSTATE_GURU_MEDITATION:
1420 case VMSTATE_GURU_MEDITATION_LS:
1421 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1422 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1423 return VINF_EM_SUSPEND;
1424
1425 case VMSTATE_DESTROYING:
1426 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1427 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1428 return VINF_EM_TERMINATE;
1429
1430 default:
1431 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1432 }
1433 }
1434
1435 /*
1436 * Debugger Facility polling.
1437 */
1438 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1439 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1440 {
1441 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1442 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1443 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1444 * somewhere before we get here, I would think. */
1445 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1446 rc = rc2;
1447 else
1448 UPDATE_RC();
1449 }
1450
1451 /*
1452 * Postponed reset request.
1453 */
1454 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1455 {
1456 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1457 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1458 UPDATE_RC();
1459 }
1460
1461 /*
1462 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1463 */
1464 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1465 {
1466 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1467 UPDATE_RC();
1468 if (rc == VINF_EM_NO_MEMORY)
1469 return rc;
1470 }
1471
1472 /* check that we got them all */
1473 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1474 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1475 }
1476
1477 /*
1478 * Normal priority then.
1479 * (Executed in no particular order.)
1480 */
1481 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1482 {
1483 /*
1484 * PDM Queues are pending.
1485 */
1486 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1487 PDMR3QueueFlushAll(pVM);
1488
1489 /*
1490 * PDM DMA transfers are pending.
1491 */
1492 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1493 PDMR3DmaRun(pVM);
1494
1495 /*
1496 * EMT Rendezvous (make sure they are handled before the requests).
1497 */
1498 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1499 {
1500 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1501 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1502 UPDATE_RC();
1503 /** @todo HACK ALERT! The following test is to make sure EM+TM
1504 * thinks the VM is stopped/reset before the next VM state change
1505 * is made. We need a better solution for this, or at least make it
1506 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1507 * VINF_EM_SUSPEND). */
1508 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1509 {
1510 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1511 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1512 return rc;
1513 }
1514 }
1515
1516 /*
1517 * Requests from other threads.
1518 */
1519 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1520 {
1521 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1522 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1523 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1524 {
1525 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1526 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1527 return rc2;
1528 }
1529 UPDATE_RC();
1530 /** @todo HACK ALERT! The following test is to make sure EM+TM
1531 * thinks the VM is stopped/reset before the next VM state change
1532 * is made. We need a better solution for this, or at least make it
1533 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1534 * VINF_EM_SUSPEND). */
1535 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1536 {
1537 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1538 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1539 return rc;
1540 }
1541 }
1542
1543 /* check that we got them all */
1544 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1545 }
1546
1547 /*
1548 * Normal priority then. (per-VCPU)
1549 * (Executed in no particular order.)
1550 */
1551 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1552 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1553 {
1554 /*
1555 * Requests from other threads.
1556 */
1557 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1558 {
1559 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1560 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1561 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1562 {
1563 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1564 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1565 return rc2;
1566 }
1567 UPDATE_RC();
1568 /** @todo HACK ALERT! The following test is to make sure EM+TM
1569 * thinks the VM is stopped/reset before the next VM state change
1570 * is made. We need a better solution for this, or at least make it
1571 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1572 * VINF_EM_SUSPEND). */
1573 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1574 {
1575 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1576 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1577 return rc;
1578 }
1579 }
1580
1581 /* check that we got them all */
1582 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1583 }
1584
1585 /*
1586 * High priority pre execution chunk last.
1587 * (Executed in ascending priority order.)
1588 */
1589 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1590 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1591 {
1592 /*
1593 * Timers before interrupts.
1594 */
1595 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1596 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1597 TMR3TimerQueuesDo(pVM);
1598
1599#if !defined(VBOX_VMM_TARGET_ARMV8)
1600 /*
1601 * Pick up asynchronously posted interrupts into the APIC.
1602 */
1603 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1604 APICUpdatePendingInterrupts(pVCpu);
1605
1606 /*
1607 * The instruction following an emulated STI should *always* be executed!
1608 *
1609 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1610 * the eip is the same as the inhibited instr address. Before we
1611 * are able to execute this instruction in raw mode (iret to
1612 * guest code) an external interrupt might force a world switch
1613 * again. Possibly allowing a guest interrupt to be dispatched
1614 * in the process. This could break the guest. Sounds very
1615 * unlikely, but such timing sensitive problem are not as rare as
1616 * you might think.
1617 *
1618 * Note! This used to be a force action flag. Can probably ditch this code.
1619 */
1620 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1621 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1622 {
1623 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1624 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1625 {
1626 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1627 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1628 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1629 }
1630 else
1631 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1632 }
1633
1634 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1635 * delivered. */
1636
1637# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1638 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1639 | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW))
1640 {
1641 /*
1642 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1643 * Takes priority over even SMI and INIT signals.
1644 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1645 */
1646 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1647 {
1648 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1649 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1650 UPDATE_RC();
1651 }
1652
1653 /*
1654 * APIC write emulation MAY have a caused a VM-exit.
1655 * If it did cause a VM-exit, there's no point checking the other VMX non-root mode FFs here.
1656 */
1657 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
1658 {
1659 /*
1660 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1661 * Takes priority over "Traps on the previous instruction".
1662 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1663 */
1664 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1665 {
1666 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1667 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1668 UPDATE_RC();
1669 }
1670 /*
1671 * VMX Nested-guest preemption timer VM-exit.
1672 * Takes priority over NMI-window VM-exits.
1673 */
1674 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1675 {
1676 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1677 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1678 UPDATE_RC();
1679 }
1680 /*
1681 * VMX interrupt-window and NMI-window VM-exits.
1682 * Takes priority over non-maskable interrupts (NMIs) and external interrupts respectively.
1683 * If we are in an interrupt shadow or if we already in the process of delivering
1684 * an event then these VM-exits cannot occur.
1685 *
1686 * Interrupt shadows block NMI-window VM-exits.
1687 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1688 *
1689 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1690 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1691 * See Intel spec. 6.7 "Nonmaskable Interrupt (NMI)".
1692 */
1693 else if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1694 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)
1695 && !TRPMHasTrap(pVCpu))
1696 {
1697 /*
1698 * VMX NMI-window VM-exit.
1699 */
1700 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1701 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1702 {
1703 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1704 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1705 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1706 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1707 && rc2 != VINF_VMX_VMEXIT
1708 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1709 UPDATE_RC();
1710 }
1711 /*
1712 * VMX interrupt-window VM-exit.
1713 * This is a bit messy with the way the code below is currently structured,
1714 * but checking VMCPU_FF_INTERRUPT_NMI here (combined with CPUMAreInterruptsInhibitedByNmi
1715 * already checked at this point) should allow a pending NMI to be delivered prior to
1716 * causing an interrupt-window VM-exit.
1717 */
1718 /** @todo Restructure this later to happen after injecting NMI/causing NMI-exit, see
1719 * code in VMX R0 event delivery. */
1720 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1721 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1722 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1723 {
1724 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1725 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1726 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1727 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1728 && rc2 != VINF_VMX_VMEXIT
1729 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1730 UPDATE_RC();
1731 }
1732 }
1733 }
1734
1735 /*
1736 * Interrupt-window and NMI-window force flags might still be pending if we didn't actually cause
1737 * a VM-exit above. They will get cleared eventually when ANY nested-guest VM-exit occurs.
1738 * However, the force flags asserted below MUST have been cleared at this point.
1739 */
1740 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1741 }
1742# endif
1743
1744 /*
1745 * Guest event injection.
1746 */
1747 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1748 bool fWakeupPending = false;
1749 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1750 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1751 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1752 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1753 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1754 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1755 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1756 {
1757 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1758 {
1759 bool fInVmxNonRootMode;
1760 bool fInSvmHwvirtMode;
1761 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1762 {
1763 fInVmxNonRootMode = false;
1764 fInSvmHwvirtMode = false;
1765 }
1766 else
1767 {
1768 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1769 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1770 }
1771
1772 /*
1773 * NMIs (take priority over external interrupts).
1774 */
1775 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1776 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1777 {
1778# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1779 if ( fInVmxNonRootMode
1780 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1781 {
1782 /* We MUST clear the NMI force-flag here, see @bugref{10318#c19}. */
1783 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1784 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1785 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1786 UPDATE_RC();
1787 }
1788 else
1789# endif
1790# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1791 if ( fInSvmHwvirtMode
1792 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1793 {
1794 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1795 AssertMsg( rc2 != VINF_SVM_VMEXIT
1796 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1797 UPDATE_RC();
1798 }
1799 else
1800# endif
1801 {
1802 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1803 if (rc2 == VINF_SUCCESS)
1804 {
1805 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1806 fWakeupPending = true;
1807# if 0 /* HMR3IsActive is not reliable (esp. after restore), just return VINF_EM_RESCHEDULE. */
1808 if (pVM->em.s.fIemExecutesAll)
1809 rc2 = VINF_EM_RESCHEDULE;
1810 else
1811 {
1812 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1813 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1814 : VINF_EM_RESCHEDULE_REM;
1815 }
1816# else
1817 rc2 = VINF_EM_RESCHEDULE;
1818# endif
1819 }
1820 UPDATE_RC();
1821 }
1822 }
1823# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1824 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1825 * actually pending like we currently do. */
1826# endif
1827 /*
1828 * External interrupts.
1829 */
1830 else
1831 {
1832 /*
1833 * VMX: virtual interrupts takes priority over physical interrupts.
1834 * SVM: physical interrupts takes priority over virtual interrupts.
1835 */
1836 if ( fInVmxNonRootMode
1837 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1838 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1839 {
1840 /** @todo NSTVMX: virtual-interrupt delivery. */
1841 rc2 = VINF_SUCCESS;
1842 }
1843 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1844 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1845 {
1846 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1847 if (fInVmxNonRootMode)
1848 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1849 else if (fInSvmHwvirtMode)
1850 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1851 else
1852 rc2 = VINF_NO_CHANGE;
1853
1854 if (rc2 == VINF_NO_CHANGE)
1855 {
1856 bool fInjected = false;
1857 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1858 /** @todo this really isn't nice, should properly handle this */
1859 /* Note! This can still cause a VM-exit (on Intel). */
1860 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1861 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1862 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1863 fWakeupPending = true;
1864 if ( pVM->em.s.fIemExecutesAll
1865 && ( rc2 == VINF_EM_RESCHEDULE_REM
1866 || rc2 == VINF_EM_RESCHEDULE_HM
1867 || rc2 == VINF_EM_RESCHEDULE_RAW))
1868 {
1869 rc2 = VINF_EM_RESCHEDULE;
1870 }
1871# ifdef VBOX_STRICT
1872 if (fInjected)
1873 rcIrq = rc2;
1874# endif
1875 }
1876 UPDATE_RC();
1877 }
1878 else if ( fInSvmHwvirtMode
1879 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1880 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1881 {
1882 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1883 if (rc2 == VINF_NO_CHANGE)
1884 {
1885 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1886 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1887 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1888 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1889 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1890 rc2 = VINF_EM_RESCHEDULE;
1891# ifdef VBOX_STRICT
1892 rcIrq = rc2;
1893# endif
1894 }
1895 UPDATE_RC();
1896 }
1897 }
1898 } /* CPUMGetGuestGif */
1899 }
1900
1901#else /* VBOX_VMM_TARGET_ARMV8 */
1902 bool fWakeupPending = false;
1903
1904 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VTIMER_ACTIVATED))
1905 {
1906 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VTIMER_ACTIVATED);
1907
1908 fWakeupPending = true;
1909 rc2 = VINF_EM_RESCHEDULE;
1910 }
1911#endif /* VBOX_VMM_TARGET_ARMV8 */
1912
1913 /*
1914 * Allocate handy pages.
1915 */
1916 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1917 {
1918 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1919 UPDATE_RC();
1920 }
1921
1922 /*
1923 * Debugger Facility request.
1924 */
1925 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1926 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1927 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
1928 {
1929 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1930 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1931 UPDATE_RC();
1932 }
1933
1934 /*
1935 * EMT Rendezvous (must be serviced before termination).
1936 */
1937 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1938 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1939 {
1940 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1941 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1942 UPDATE_RC();
1943 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1944 * stopped/reset before the next VM state change is made. We need a better
1945 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1946 * && rc >= VINF_EM_SUSPEND). */
1947 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1948 {
1949 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1950 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1951 return rc;
1952 }
1953 }
1954
1955 /*
1956 * State change request (cleared by vmR3SetStateLocked).
1957 */
1958 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1959 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1960 {
1961 VMSTATE enmState = VMR3GetState(pVM);
1962 switch (enmState)
1963 {
1964 case VMSTATE_FATAL_ERROR:
1965 case VMSTATE_FATAL_ERROR_LS:
1966 case VMSTATE_GURU_MEDITATION:
1967 case VMSTATE_GURU_MEDITATION_LS:
1968 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1969 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1970 return VINF_EM_SUSPEND;
1971
1972 case VMSTATE_DESTROYING:
1973 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1974 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1975 return VINF_EM_TERMINATE;
1976
1977 default:
1978 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1979 }
1980 }
1981
1982 /*
1983 * Out of memory? Since most of our fellow high priority actions may cause us
1984 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1985 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1986 * than us since we can terminate without allocating more memory.
1987 */
1988 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1989 {
1990 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1991 UPDATE_RC();
1992 if (rc == VINF_EM_NO_MEMORY)
1993 return rc;
1994 }
1995
1996 /*
1997 * If the virtual sync clock is still stopped, make TM restart it.
1998 */
1999 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2000 TMR3VirtualSyncFF(pVM, pVCpu);
2001
2002#ifdef DEBUG
2003 /*
2004 * Debug, pause the VM.
2005 */
2006 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2007 {
2008 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2009 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2010 return VINF_EM_SUSPEND;
2011 }
2012#endif
2013
2014 /* check that we got them all */
2015 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2016#if defined(VBOX_VMM_TARGET_ARMV8)
2017 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_DBGF));
2018#else
2019 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2020#endif
2021 }
2022
2023#undef UPDATE_RC
2024 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2025 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2026 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2027 return rc;
2028}
2029
2030
2031/**
2032 * Check if the preset execution time cap restricts guest execution scheduling.
2033 *
2034 * @returns true if allowed, false otherwise
2035 * @param pVM The cross context VM structure.
2036 * @param pVCpu The cross context virtual CPU structure.
2037 */
2038bool emR3IsExecutionAllowedSlow(PVM pVM, PVMCPU pVCpu)
2039{
2040 Assert(pVM->uCpuExecutionCap != 100);
2041 uint64_t cMsUserTime;
2042 uint64_t cMsKernelTime;
2043 if (RT_SUCCESS(RTThreadGetExecutionTimeMilli(&cMsKernelTime, &cMsUserTime)))
2044 {
2045 uint64_t const msTimeNow = RTTimeMilliTS();
2046 if (pVCpu->em.s.msTimeSliceStart + EM_TIME_SLICE < msTimeNow)
2047 {
2048 /* New time slice. */
2049 pVCpu->em.s.msTimeSliceStart = msTimeNow;
2050 pVCpu->em.s.cMsTimeSliceStartExec = cMsKernelTime + cMsUserTime;
2051 pVCpu->em.s.cMsTimeSliceExec = 0;
2052 }
2053 pVCpu->em.s.cMsTimeSliceExec = cMsKernelTime + cMsUserTime - pVCpu->em.s.cMsTimeSliceStartExec;
2054
2055 bool const fRet = pVCpu->em.s.cMsTimeSliceExec < (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100;
2056 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.msTimeSliceStart,
2057 pVCpu->em.s.cMsTimeSliceStartExec, pVCpu->em.s.cMsTimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2058 return fRet;
2059 }
2060 return true;
2061}
2062
2063
2064/**
2065 * Execute VM.
2066 *
2067 * This function is the main loop of the VM. The emulation thread
2068 * calls this function when the VM has been successfully constructed
2069 * and we're ready for executing the VM.
2070 *
2071 * Returning from this function means that the VM is turned off or
2072 * suspended (state already saved) and deconstruction is next in line.
2073 *
2074 * All interaction from other thread are done using forced actions
2075 * and signalling of the wait object.
2076 *
2077 * @returns VBox status code, informational status codes may indicate failure.
2078 * @param pVM The cross context VM structure.
2079 * @param pVCpu The cross context virtual CPU structure.
2080 */
2081VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2082{
2083 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2084 pVM,
2085 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2086 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2087 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2088 VM_ASSERT_EMT(pVM);
2089 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2090 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2091 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2092 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2093
2094 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2095 if (rc == 0)
2096 {
2097 /*
2098 * Start the virtual time.
2099 */
2100 TMR3NotifyResume(pVM, pVCpu);
2101
2102 /*
2103 * The Outer Main Loop.
2104 */
2105 bool fFFDone = false;
2106
2107 /* Reschedule right away to start in the right state. */
2108 rc = VINF_SUCCESS;
2109
2110 /* If resuming after a pause or a state load, restore the previous
2111 state or else we'll start executing code. Else, just reschedule. */
2112 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2113 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2114 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2115 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2116 else
2117 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2118 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2119
2120 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2121 for (;;)
2122 {
2123 /*
2124 * Before we can schedule anything (we're here because
2125 * scheduling is required) we must service any pending
2126 * forced actions to avoid any pending action causing
2127 * immediate rescheduling upon entering an inner loop
2128 *
2129 * Do forced actions.
2130 */
2131 if ( !fFFDone
2132 && RT_SUCCESS(rc)
2133 && rc != VINF_EM_TERMINATE
2134 && rc != VINF_EM_OFF
2135 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2136 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2137 {
2138 rc = emR3ForcedActions(pVM, pVCpu, rc);
2139 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2140 }
2141 else if (fFFDone)
2142 fFFDone = false;
2143
2144#if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8)
2145 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2146#endif
2147
2148 /*
2149 * Now what to do?
2150 */
2151 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2152 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2153 switch (rc)
2154 {
2155 /*
2156 * Keep doing what we're currently doing.
2157 */
2158 case VINF_SUCCESS:
2159 break;
2160
2161 /*
2162 * Reschedule - to raw-mode execution.
2163 */
2164/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2165 case VINF_EM_RESCHEDULE_RAW:
2166 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2167 AssertLogRelFailed();
2168 pVCpu->em.s.enmState = EMSTATE_NONE;
2169 break;
2170
2171 /*
2172 * Reschedule - to HM or NEM.
2173 */
2174 case VINF_EM_RESCHEDULE_HM:
2175 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2176#if !defined(VBOX_VMM_TARGET_ARMV8)
2177 if (VM_IS_HM_ENABLED(pVM))
2178 {
2179 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2180 {
2181 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2182 pVCpu->em.s.enmState = EMSTATE_HM;
2183 }
2184 else
2185 {
2186 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_RECOMPILER)\n", enmOldState, EMSTATE_RECOMPILER));
2187 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2188 }
2189 }
2190 else
2191#endif
2192 if (VM_IS_NEM_ENABLED(pVM))
2193 {
2194 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2195 pVCpu->em.s.enmState = EMSTATE_NEM;
2196 }
2197 else
2198 {
2199 AssertLogRelFailed();
2200 pVCpu->em.s.enmState = EMSTATE_NONE;
2201 }
2202 break;
2203
2204 /*
2205 * Reschedule - to recompiled execution.
2206 */
2207 case VINF_EM_RESCHEDULE_REM:
2208 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2209 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n",
2210 enmOldState, EMSTATE_RECOMPILER));
2211 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2212 break;
2213
2214 /*
2215 * Resume.
2216 */
2217 case VINF_EM_RESUME:
2218 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2219 /* Don't reschedule in the halted or wait for SIPI case. */
2220 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2221 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2222 {
2223 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2224 break;
2225 }
2226 /* fall through and get scheduled. */
2227 RT_FALL_THRU();
2228
2229 /*
2230 * Reschedule.
2231 */
2232 case VINF_EM_RESCHEDULE:
2233 {
2234 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2235 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2236 pVCpu->em.s.enmState = enmState;
2237 break;
2238 }
2239
2240 /*
2241 * Halted.
2242 */
2243 case VINF_EM_HALT:
2244 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2245 pVCpu->em.s.enmState = EMSTATE_HALTED;
2246 break;
2247
2248 /*
2249 * Switch to the wait for SIPI state (application processor only)
2250 */
2251 case VINF_EM_WAIT_SIPI:
2252 Assert(pVCpu->idCpu != 0);
2253 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2254 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2255 break;
2256
2257
2258 /*
2259 * Suspend.
2260 */
2261 case VINF_EM_SUSPEND:
2262 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2263 Assert(enmOldState != EMSTATE_SUSPENDED);
2264 pVCpu->em.s.enmPrevState = enmOldState;
2265 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2266 break;
2267
2268 /*
2269 * Reset.
2270 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2271 */
2272 case VINF_EM_RESET:
2273 {
2274 if (pVCpu->idCpu == 0)
2275 {
2276 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2277 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2278 pVCpu->em.s.enmState = enmState;
2279 }
2280 else
2281 {
2282 /* All other VCPUs go into the wait for SIPI state. */
2283 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2284 }
2285 break;
2286 }
2287
2288 /*
2289 * Power Off.
2290 */
2291 case VINF_EM_OFF:
2292 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2293 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2294 TMR3NotifySuspend(pVM, pVCpu);
2295 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2296 return rc;
2297
2298 /*
2299 * Terminate the VM.
2300 */
2301 case VINF_EM_TERMINATE:
2302 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2303 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2304 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2305 TMR3NotifySuspend(pVM, pVCpu);
2306 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2307 return rc;
2308
2309
2310 /*
2311 * Out of memory, suspend the VM and stuff.
2312 */
2313 case VINF_EM_NO_MEMORY:
2314 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2315 Assert(enmOldState != EMSTATE_SUSPENDED);
2316 pVCpu->em.s.enmPrevState = enmOldState;
2317 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2318 TMR3NotifySuspend(pVM, pVCpu);
2319 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2320
2321 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2322 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2323 if (rc != VINF_EM_SUSPEND)
2324 {
2325 if (RT_SUCCESS_NP(rc))
2326 {
2327 AssertLogRelMsgFailed(("%Rrc\n", rc));
2328 rc = VERR_EM_INTERNAL_ERROR;
2329 }
2330 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2331 }
2332 return rc;
2333
2334 /*
2335 * Guest debug events.
2336 */
2337 case VINF_EM_DBG_STEPPED:
2338 case VINF_EM_DBG_STOP:
2339 case VINF_EM_DBG_EVENT:
2340 case VINF_EM_DBG_BREAKPOINT:
2341 case VINF_EM_DBG_STEP:
2342 if (enmOldState == EMSTATE_HM)
2343 {
2344 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2345 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2346 }
2347 else if (enmOldState == EMSTATE_NEM)
2348 {
2349 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2350 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2351 }
2352 else if (enmOldState == EMSTATE_RECOMPILER)
2353 {
2354 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RECOMPILER));
2355 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RECOMPILER;
2356 }
2357 else
2358 {
2359 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2360 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2361 }
2362 break;
2363
2364 /*
2365 * Hypervisor debug events.
2366 */
2367 case VINF_EM_DBG_HYPER_STEPPED:
2368 case VINF_EM_DBG_HYPER_BREAKPOINT:
2369 case VINF_EM_DBG_HYPER_ASSERTION:
2370 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2371 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2372 break;
2373
2374 /*
2375 * Triple fault.
2376 */
2377 case VINF_EM_TRIPLE_FAULT:
2378 if (!pVM->em.s.fGuruOnTripleFault)
2379 {
2380 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2381 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2382 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2383 continue;
2384 }
2385 /* Else fall through and trigger a guru. */
2386 RT_FALL_THRU();
2387
2388 case VERR_VMM_RING0_ASSERTION:
2389 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2390 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2391 break;
2392
2393 /*
2394 * Any error code showing up here other than the ones we
2395 * know and process above are considered to be FATAL.
2396 *
2397 * Unknown warnings and informational status codes are also
2398 * included in this.
2399 */
2400 default:
2401 if (RT_SUCCESS_NP(rc))
2402 {
2403 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2404 rc = VERR_EM_INTERNAL_ERROR;
2405 }
2406 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2407 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2408 break;
2409 }
2410
2411 /*
2412 * Act on state transition.
2413 */
2414 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2415 if (enmOldState != enmNewState)
2416 {
2417 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2418
2419 /* Clear MWait flags and the unhalt FF. */
2420 if ( enmOldState == EMSTATE_HALTED
2421 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2422 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2423 && ( enmNewState == EMSTATE_HM
2424 || enmNewState == EMSTATE_NEM
2425 || enmNewState == EMSTATE_RECOMPILER
2426 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2427 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2428 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2429 || enmNewState == EMSTATE_DEBUG_GUEST_RECOMPILER) )
2430 {
2431 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2432 {
2433 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2434 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2435 }
2436 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2437 {
2438 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2439 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2440 }
2441 }
2442 }
2443 else
2444 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2445
2446 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2447 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2448
2449 /*
2450 * Act on the new state.
2451 */
2452 switch (enmNewState)
2453 {
2454 /*
2455 * Execute hardware accelerated raw.
2456 */
2457 case EMSTATE_HM:
2458#if defined(VBOX_VMM_TARGET_ARMV8)
2459 AssertReleaseFailed(); /* Should never get here. */
2460#else
2461 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2462#endif
2463 break;
2464
2465 /*
2466 * Execute hardware accelerated raw.
2467 */
2468 case EMSTATE_NEM:
2469 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2470 break;
2471
2472 /*
2473 * Execute recompiled.
2474 */
2475 case EMSTATE_RECOMPILER:
2476 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, &fFFDone));
2477 Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc));
2478 break;
2479
2480 /*
2481 * Execute in the interpreter.
2482 */
2483 case EMSTATE_IEM:
2484 {
2485#if 0 /* For comparing HM and IEM (@bugref{10464}). */
2486 PCPUMCTX const pCtx = &pVCpu->cpum.GstCtx;
2487 PCX86FXSTATE const pX87 = &pCtx->XState.x87;
2488 Log11(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
2489 "eip=%08x esp=%08x ebp=%08x eflags=%08x\n"
2490 "cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x\n"
2491 "fsw=%04x fcw=%04x ftw=%02x top=%u%s%s%s%s%s%s%s%s%s\n"
2492 "st0=%.10Rhxs st1=%.10Rhxs st2=%.10Rhxs st3=%.10Rhxs\n"
2493 "st4=%.10Rhxs st5=%.10Rhxs st6=%.10Rhxs st7=%.10Rhxs\n",
2494 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->edi, pCtx->edi,
2495 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.u,
2496 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, pCtx->fs.Sel, pCtx->gs.Sel,
2497 pX87->FSW, pX87->FCW, pX87->FTW, X86_FSW_TOP_GET(pX87->FSW),
2498 pX87->FSW & X86_FSW_ES ? " ES!" : "",
2499 pX87->FSW & X86_FSW_IE ? " IE" : "",
2500 pX87->FSW & X86_FSW_DE ? " DE" : "",
2501 pX87->FSW & X86_FSW_SF ? " SF" : "",
2502 pX87->FSW & X86_FSW_B ? " B!" : "",
2503 pX87->FSW & X86_FSW_C0 ? " C0" : "",
2504 pX87->FSW & X86_FSW_C1 ? " C1" : "",
2505 pX87->FSW & X86_FSW_C2 ? " C2" : "",
2506 pX87->FSW & X86_FSW_C3 ? " C3" : "",
2507 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(0)],
2508 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(1)],
2509 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(2)],
2510 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(3)],
2511 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(4)],
2512 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(5)],
2513 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(6)],
2514 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(7)]));
2515 DBGFR3DisasInstrCurrentLogInternal(pVCpu, NULL);
2516#endif
2517
2518 uint32_t cInstructions = 0;
2519#if 0 /* For testing purposes. */
2520 //STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2521 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2522 //STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2523 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2524 rc = VINF_SUCCESS;
2525 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2526#endif
2527 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2528 if (pVM->em.s.fIemExecutesAll)
2529 {
2530 Assert(rc != VINF_EM_RESCHEDULE_REM);
2531 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2532 Assert(rc != VINF_EM_RESCHEDULE_HM);
2533#ifdef VBOX_HIGH_RES_TIMERS_HACK
2534 if (cInstructions < 2048)
2535 TMTimerPollVoid(pVM, pVCpu);
2536#endif
2537 }
2538 fFFDone = false;
2539 break;
2540 }
2541
2542 /*
2543 * Application processor execution halted until SIPI.
2544 */
2545 case EMSTATE_WAIT_SIPI:
2546 /* no break */
2547 /*
2548 * hlt - execution halted until interrupt.
2549 */
2550 case EMSTATE_HALTED:
2551 {
2552 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2553 /* If HM (or someone else) store a pending interrupt in
2554 TRPM, it must be dispatched ASAP without any halting.
2555 Anything pending in TRPM has been accepted and the CPU
2556 should already be the right state to receive it. */
2557 if (TRPMHasTrap(pVCpu))
2558 rc = VINF_EM_RESCHEDULE;
2559#if !defined(VBOX_VMM_TARGET_ARMV8)
2560 /* MWAIT has a special extension where it's woken up when
2561 an interrupt is pending even when IF=0. */
2562 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2563 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2564 {
2565 rc = VMR3WaitHalted(pVM, pVCpu, 0 /*fFlags*/);
2566 if (rc == VINF_SUCCESS)
2567 {
2568 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2569 APICUpdatePendingInterrupts(pVCpu);
2570
2571 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2572 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2573 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2574 {
2575 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2576 rc = VINF_EM_RESCHEDULE;
2577 }
2578
2579 }
2580 }
2581#endif
2582 else
2583 {
2584#if defined(VBOX_VMM_TARGET_ARMV8)
2585 const uint32_t fWaitHalted = 0; /* WFI/WFE always return when an interrupt happens. */
2586#else
2587 const uint32_t fWaitHalted = (CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF) ? 0 : VMWAITHALTED_F_IGNORE_IRQS;
2588#endif
2589 rc = VMR3WaitHalted(pVM, pVCpu, fWaitHalted);
2590 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2591 check VMCPU_FF_UPDATE_APIC here. */
2592 if ( rc == VINF_SUCCESS
2593#if defined(VBOX_VMM_TARGET_ARMV8)
2594 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_VTIMER_ACTIVATED
2595 | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_INTERRUPT_IRQ)
2596#else
2597 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)
2598#endif
2599 )
2600 {
2601 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2602 rc = VINF_EM_RESCHEDULE;
2603 }
2604 }
2605
2606 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2607 break;
2608 }
2609
2610 /*
2611 * Suspended - return to VM.cpp.
2612 */
2613 case EMSTATE_SUSPENDED:
2614 TMR3NotifySuspend(pVM, pVCpu);
2615 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2616 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2617 return VINF_EM_SUSPEND;
2618
2619 /*
2620 * Debugging in the guest.
2621 */
2622 case EMSTATE_DEBUG_GUEST_RAW:
2623 case EMSTATE_DEBUG_GUEST_HM:
2624 case EMSTATE_DEBUG_GUEST_NEM:
2625 case EMSTATE_DEBUG_GUEST_IEM:
2626 case EMSTATE_DEBUG_GUEST_RECOMPILER:
2627 TMR3NotifySuspend(pVM, pVCpu);
2628 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2629 TMR3NotifyResume(pVM, pVCpu);
2630 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2631 break;
2632
2633 /*
2634 * Debugging in the hypervisor.
2635 */
2636 case EMSTATE_DEBUG_HYPER:
2637 {
2638 TMR3NotifySuspend(pVM, pVCpu);
2639 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2640
2641 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2642 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2643 if (rc != VINF_SUCCESS)
2644 {
2645 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2646 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2647 else
2648 {
2649 /* switch to guru meditation mode */
2650 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2651 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2652 VMMR3FatalDump(pVM, pVCpu, rc);
2653 }
2654 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2655 return rc;
2656 }
2657
2658 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2659 TMR3NotifyResume(pVM, pVCpu);
2660 break;
2661 }
2662
2663 /*
2664 * Guru meditation takes place in the debugger.
2665 */
2666 case EMSTATE_GURU_MEDITATION:
2667 {
2668 TMR3NotifySuspend(pVM, pVCpu);
2669 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2670 VMMR3FatalDump(pVM, pVCpu, rc);
2671 emR3Debug(pVM, pVCpu, rc);
2672 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2673 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2674 return rc;
2675 }
2676
2677 /*
2678 * The states we don't expect here.
2679 */
2680 case EMSTATE_NONE:
2681 case EMSTATE_RAW_OBSOLETE:
2682 case EMSTATE_IEM_THEN_REM_OBSOLETE:
2683 case EMSTATE_TERMINATING:
2684 default:
2685 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2686 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2687 TMR3NotifySuspend(pVM, pVCpu);
2688 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2689 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2690 return VERR_EM_INTERNAL_ERROR;
2691 }
2692 } /* The Outer Main Loop */
2693 }
2694 else
2695 {
2696 /*
2697 * Fatal error.
2698 */
2699 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2700 TMR3NotifySuspend(pVM, pVCpu);
2701 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2702 VMMR3FatalDump(pVM, pVCpu, rc);
2703 emR3Debug(pVM, pVCpu, rc);
2704 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2705 /** @todo change the VM state! */
2706 return rc;
2707 }
2708
2709 /* not reached */
2710}
2711
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette