VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 100101

Last change on this file since 100101 was 100012, checked in by vboxsync, 18 months ago

VMM/EM: Long #else/#endif should be marked. bugref:10389

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 113.0 KB
Line 
1/* $Id: EM.cpp 100012 2023-05-30 11:50:41Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RmExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/err.h>
73#include "VMMTracing.h"
74
75#include <iprt/asm.h>
76#include <iprt/string.h>
77#include <iprt/stream.h>
78#include <iprt/thread.h>
79
80#include "EMInline.h"
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
88#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
89static const char *emR3GetStateName(EMSTATE enmState);
90#endif
91static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
92
93
94/**
95 * Initializes the EM.
96 *
97 * @returns VBox status code.
98 * @param pVM The cross context VM structure.
99 */
100VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
101{
102 LogFlow(("EMR3Init\n"));
103 /*
104 * Assert alignment and sizes.
105 */
106 AssertCompileMemberAlignment(VM, em.s, 32);
107 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
108 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
109 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
110
111 /*
112 * Init the structure.
113 */
114 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
115 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
116
117 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
118#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) && !defined(VBOX_VMM_TARGET_ARMV8)
119 true
120#else
121 false
122#endif
123 );
124 AssertLogRelRCReturn(rc, rc);
125
126 bool fEnabled;
127 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->em.s.fGuruOnTripleFault = !fEnabled;
130 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
131 {
132 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
133 pVM->em.s.fGuruOnTripleFault = true;
134 }
135
136 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
137
138 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
139 * Whether to try correlate exit history in any context, detect hot spots and
140 * try optimize these using IEM if there are other exits close by. This
141 * overrides the context specific settings. */
142 bool fExitOptimizationEnabled = true;
143 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
144 AssertLogRelRCReturn(rc, rc);
145
146 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
147 * Whether to optimize exits in ring-0. Setting this to false will also disable
148 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
149 * capabilities of the host kernel, this optimization may be unavailable. */
150 bool fExitOptimizationEnabledR0 = true;
151 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
152 AssertLogRelRCReturn(rc, rc);
153 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
154
155 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
156 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
157 * hooks are in effect). */
158 /** @todo change the default to true here */
159 bool fExitOptimizationEnabledR0PreemptDisabled = true;
160 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
161 AssertLogRelRCReturn(rc, rc);
162 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
163
164 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
165 * Maximum number of instruction to let EMHistoryExec execute in one go. */
166 uint16_t cHistoryExecMaxInstructions = 8192;
167 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryExecMaxInstructions < 16)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
171
172 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
173 * Maximum number of instruction between exits during probing. */
174 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
175#ifdef RT_OS_WINDOWS
176 if (VM_IS_NEM_ENABLED(pVM))
177 cHistoryProbeMaxInstructionsWithoutExit = 32;
178#endif
179 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
180 cHistoryProbeMaxInstructionsWithoutExit);
181 AssertLogRelRCReturn(rc, rc);
182 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
183 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
184 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
185
186 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
187 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
188 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
189 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
190 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
191 cHistoryProbeMinInstructions);
192 AssertLogRelRCReturn(rc, rc);
193
194 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
195 {
196 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
197 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
198 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
199 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
200 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
201 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
202 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
203 }
204
205 /*
206 * Saved state.
207 */
208 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
209 NULL, NULL, NULL,
210 NULL, emR3Save, NULL,
211 NULL, emR3Load, NULL);
212 if (RT_FAILURE(rc))
213 return rc;
214
215 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
216 {
217 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
218
219 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
220 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
221 pVCpu->em.s.msTimeSliceStart = 0; /* paranoia */
222 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
223
224# define EM_REG_COUNTER(a, b, c) \
225 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
226 AssertRC(rc);
227
228# define EM_REG_COUNTER_USED(a, b, c) \
229 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
230 AssertRC(rc);
231
232# define EM_REG_PROFILE(a, b, c) \
233 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
234 AssertRC(rc);
235
236# define EM_REG_PROFILE_ADV(a, b, c) \
237 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
238 AssertRC(rc);
239
240 /*
241 * Statistics.
242 */
243#ifdef VBOX_WITH_STATISTICS
244 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
245 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
246
247 /* these should be considered for release statistics. */
248 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
249 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
250 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
251#endif
252 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
253 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
254#ifdef VBOX_WITH_STATISTICS
255 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
256 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
257 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
258#endif
259 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
260 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
261#ifdef VBOX_WITH_STATISTICS
262 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
263#endif
264
265 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
266 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
267 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
268 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RecompilerExecute (excluding FFs).");
269
270 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
271
272 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
273 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
274 AssertRC(rc);
275
276 /* History record statistics */
277 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
278 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
279 AssertRC(rc);
280
281 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
282 {
283 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
284 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
285 AssertRC(rc);
286 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
287 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
288 AssertRC(rc);
289 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
290 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
291 AssertRC(rc);
292 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
293 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
294 AssertRC(rc);
295 }
296
297 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
298 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
299 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
300 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
301 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
302 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
303 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
304 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
305 }
306
307 emR3InitDbg(pVM);
308 return VINF_SUCCESS;
309}
310
311
312/**
313 * Called when a VM initialization stage is completed.
314 *
315 * @returns VBox status code.
316 * @param pVM The cross context VM structure.
317 * @param enmWhat The initialization state that was completed.
318 */
319VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
320{
321 if (enmWhat == VMINITCOMPLETED_RING0)
322 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
323 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
324 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
325 return VINF_SUCCESS;
326}
327
328
329/**
330 * Applies relocations to data and code managed by this
331 * component. This function will be called at init and
332 * whenever the VMM need to relocate it self inside the GC.
333 *
334 * @param pVM The cross context VM structure.
335 */
336VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
337{
338 LogFlow(("EMR3Relocate\n"));
339 RT_NOREF(pVM);
340}
341
342
343/**
344 * Reset the EM state for a CPU.
345 *
346 * Called by EMR3Reset and hot plugging.
347 *
348 * @param pVCpu The cross context virtual CPU structure.
349 */
350VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
351{
352 /* Reset scheduling state. */
353 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
354
355 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
356 out of the HALTED state here so that enmPrevState doesn't end up as
357 HALTED when EMR3Execute returns. */
358 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
359 {
360 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
361 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
362 }
363}
364
365
366/**
367 * Reset notification.
368 *
369 * @param pVM The cross context VM structure.
370 */
371VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
372{
373 Log(("EMR3Reset: \n"));
374 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
375 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
376}
377
378
379/**
380 * Terminates the EM.
381 *
382 * Termination means cleaning up and freeing all resources,
383 * the VM it self is at this point powered off or suspended.
384 *
385 * @returns VBox status code.
386 * @param pVM The cross context VM structure.
387 */
388VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
389{
390 RT_NOREF(pVM);
391 return VINF_SUCCESS;
392}
393
394
395/**
396 * Execute state save operation.
397 *
398 * @returns VBox status code.
399 * @param pVM The cross context VM structure.
400 * @param pSSM SSM operation handle.
401 */
402static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
403{
404 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
405 {
406 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
407
408 SSMR3PutBool(pSSM, false /*fForceRAW*/);
409
410 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
411 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
412 SSMR3PutU32(pSSM,
413 pVCpu->em.s.enmPrevState == EMSTATE_NONE
414 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED
415 || pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
416 ? pVCpu->em.s.enmPrevState : EMSTATE_NONE);
417
418 /* Save mwait state. */
419 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
420 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
421 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
422 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
423 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
424 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
425 AssertRCReturn(rc, rc);
426 }
427 return VINF_SUCCESS;
428}
429
430
431/**
432 * Execute state load operation.
433 *
434 * @returns VBox status code.
435 * @param pVM The cross context VM structure.
436 * @param pSSM SSM operation handle.
437 * @param uVersion Data layout version.
438 * @param uPass The data pass.
439 */
440static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
441{
442 /*
443 * Validate version.
444 */
445 if ( uVersion > EM_SAVED_STATE_VERSION
446 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
447 {
448 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
449 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
450 }
451 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
452
453 /*
454 * Load the saved state.
455 */
456 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
457 {
458 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
459
460 bool fForceRAWIgnored;
461 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
462 AssertRCReturn(rc, rc);
463
464 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
465 {
466 /* We are only intereseted in two enmPrevState values for use when
467 EMR3ExecuteVM is called.
468 Since ~r157540. only these two and EMSTATE_NONE are saved. */
469 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
470 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
471 if ( pVCpu->em.s.enmPrevState != EMSTATE_WAIT_SIPI
472 && pVCpu->em.s.enmPrevState != EMSTATE_HALTED)
473 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
474
475 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
476 }
477 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
478 {
479 /* Load mwait state. */
480 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
481 AssertRCReturn(rc, rc);
482 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
483 AssertRCReturn(rc, rc);
484 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
485 AssertRCReturn(rc, rc);
486 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
487 AssertRCReturn(rc, rc);
488 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
489 AssertRCReturn(rc, rc);
490 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
491 AssertRCReturn(rc, rc);
492 }
493 }
494 return VINF_SUCCESS;
495}
496
497
498/**
499 * Argument packet for emR3SetExecutionPolicy.
500 */
501struct EMR3SETEXECPOLICYARGS
502{
503 EMEXECPOLICY enmPolicy;
504 bool fEnforce;
505};
506
507
508/**
509 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
510 */
511static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
512{
513 /*
514 * Only the first CPU changes the variables.
515 */
516 if (pVCpu->idCpu == 0)
517 {
518 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
519 switch (pArgs->enmPolicy)
520 {
521 case EMEXECPOLICY_IEM_ALL:
522 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
523
524 /* For making '.alliem 1' useful during debugging, transition the
525 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
526 for (VMCPUID i = 0; i < pVM->cCpus; i++)
527 {
528 PVMCPU pVCpuX = pVM->apCpusR3[i];
529 switch (pVCpuX->em.s.enmState)
530 {
531 case EMSTATE_DEBUG_GUEST_RECOMPILER:
532 if (pVM->em.s.fIemRecompiled)
533 break;
534 RT_FALL_THROUGH();
535 case EMSTATE_DEBUG_GUEST_RAW:
536 case EMSTATE_DEBUG_GUEST_HM:
537 case EMSTATE_DEBUG_GUEST_NEM:
538 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
539 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
540 break;
541 case EMSTATE_DEBUG_GUEST_IEM:
542 default:
543 break;
544 }
545 }
546 break;
547
548 case EMEXECPOLICY_IEM_RECOMPILED:
549 pVM->em.s.fIemRecompiled = pArgs->fEnforce;
550 break;
551
552 default:
553 AssertFailedReturn(VERR_INVALID_PARAMETER);
554 }
555 Log(("EM: Set execution policy: fIemExecutesAll=%RTbool fIemRecompiled=%RTbool\n",
556 pVM->em.s.fIemExecutesAll, pVM->em.s.fIemRecompiled));
557 }
558
559 /*
560 * Force rescheduling if in HM, NEM, IEM/interpreter or IEM/recompiler.
561 */
562 Assert(pVCpu->em.s.enmState != EMSTATE_RAW_OBSOLETE);
563 return pVCpu->em.s.enmState == EMSTATE_HM
564 || pVCpu->em.s.enmState == EMSTATE_NEM
565 || pVCpu->em.s.enmState == EMSTATE_IEM
566 || pVCpu->em.s.enmState == EMSTATE_RECOMPILER
567 ? VINF_EM_RESCHEDULE
568 : VINF_SUCCESS;
569}
570
571
572/**
573 * Changes an execution scheduling policy parameter.
574 *
575 * This is used to enable or disable raw-mode / hardware-virtualization
576 * execution of user and supervisor code.
577 *
578 * @returns VINF_SUCCESS on success.
579 * @returns VINF_RESCHEDULE if a rescheduling might be required.
580 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
581 *
582 * @param pUVM The user mode VM handle.
583 * @param enmPolicy The scheduling policy to change.
584 * @param fEnforce Whether to enforce the policy or not.
585 */
586VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
587{
588 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
589 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
590 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
591
592 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
593 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
594}
595
596
597/**
598 * Queries an execution scheduling policy parameter.
599 *
600 * @returns VBox status code
601 * @param pUVM The user mode VM handle.
602 * @param enmPolicy The scheduling policy to query.
603 * @param pfEnforced Where to return the current value.
604 */
605VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
606{
607 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
608 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
609 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
610 PVM pVM = pUVM->pVM;
611 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
612
613 /* No need to bother EMTs with a query. */
614 switch (enmPolicy)
615 {
616 case EMEXECPOLICY_IEM_ALL:
617 *pfEnforced = pVM->em.s.fIemExecutesAll;
618 break;
619 case EMEXECPOLICY_IEM_RECOMPILED:
620 *pfEnforced = pVM->em.s.fIemRecompiled;
621 break;
622 default:
623 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
624 }
625
626 return VINF_SUCCESS;
627}
628
629
630/**
631 * Queries the main execution engine of the VM.
632 *
633 * @returns VBox status code
634 * @param pUVM The user mode VM handle.
635 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
636 */
637VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
638{
639 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
640 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
641
642 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
643 PVM pVM = pUVM->pVM;
644 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
645
646 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
647 return VINF_SUCCESS;
648}
649
650
651/**
652 * Raise a fatal error.
653 *
654 * Safely terminate the VM with full state report and stuff. This function
655 * will naturally never return.
656 *
657 * @param pVCpu The cross context virtual CPU structure.
658 * @param rc VBox status code.
659 */
660VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
661{
662 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
663 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
664}
665
666
667#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
668/**
669 * Gets the EM state name.
670 *
671 * @returns pointer to read only state name,
672 * @param enmState The state.
673 */
674static const char *emR3GetStateName(EMSTATE enmState)
675{
676 switch (enmState)
677 {
678 case EMSTATE_NONE: return "EMSTATE_NONE";
679 case EMSTATE_RAW_OBSOLETE: return "EMSTATE_RAW_OBSOLETE";
680 case EMSTATE_HM: return "EMSTATE_HM";
681 case EMSTATE_IEM: return "EMSTATE_IEM";
682 case EMSTATE_RECOMPILER: return "EMSTATE_RECOMPILER";
683 case EMSTATE_HALTED: return "EMSTATE_HALTED";
684 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
685 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
686 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
687 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
688 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
689 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
690 case EMSTATE_DEBUG_GUEST_RECOMPILER: return "EMSTATE_DEBUG_GUEST_RECOMPILER";
691 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
692 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
693 case EMSTATE_IEM_THEN_REM_OBSOLETE: return "EMSTATE_IEM_THEN_REM_OBSOLETE";
694 case EMSTATE_NEM: return "EMSTATE_NEM";
695 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
696 default: return "Unknown!";
697 }
698}
699#endif /* LOG_ENABLED || VBOX_STRICT */
700
701
702#if !defined(VBOX_VMM_TARGET_ARMV8)
703/**
704 * Handle pending ring-3 I/O port write.
705 *
706 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
707 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
708 *
709 * @returns Strict VBox status code.
710 * @param pVM The cross context VM structure.
711 * @param pVCpu The cross context virtual CPU structure.
712 */
713VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
714{
715 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
716
717 /* Get and clear the pending data. */
718 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
719 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
720 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
721 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
722 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
723
724 /* Assert sanity. */
725 switch (cbValue)
726 {
727 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
728 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
729 case 4: break;
730 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
731 }
732 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
733
734 /* Do the work.*/
735 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
736 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
737 if (IOM_SUCCESS(rcStrict))
738 {
739 pVCpu->cpum.GstCtx.rip += cbInstr;
740 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
741 }
742 return rcStrict;
743}
744
745
746/**
747 * Handle pending ring-3 I/O port write.
748 *
749 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
750 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
751 *
752 * @returns Strict VBox status code.
753 * @param pVM The cross context VM structure.
754 * @param pVCpu The cross context virtual CPU structure.
755 */
756VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
757{
758 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
759
760 /* Get and clear the pending data. */
761 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
762 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
763 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
764 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
765
766 /* Assert sanity. */
767 switch (cbValue)
768 {
769 case 1: break;
770 case 2: break;
771 case 4: break;
772 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
773 }
774 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
775 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
776
777 /* Do the work.*/
778 uint32_t uValue = 0;
779 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
780 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
781 if (IOM_SUCCESS(rcStrict))
782 {
783 if (cbValue == 4)
784 pVCpu->cpum.GstCtx.rax = uValue;
785 else if (cbValue == 2)
786 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
787 else
788 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
789 pVCpu->cpum.GstCtx.rip += cbInstr;
790 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
791 }
792 return rcStrict;
793}
794
795
796/**
797 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
798 * Worker for emR3ExecuteSplitLockInstruction}
799 */
800static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
801{
802 /* Only execute on the specified EMT. */
803 if (pVCpu == (PVMCPU)pvUser)
804 {
805 LogFunc(("\n"));
806 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
807 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
808 if (rcStrict == VINF_IEM_RAISED_XCPT)
809 rcStrict = VINF_SUCCESS;
810 return rcStrict;
811 }
812 RT_NOREF(pVM);
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Handle an instruction causing a split cacheline lock access in SMP VMs.
819 *
820 * Generally we only get here if the host has split-lock detection enabled and
821 * this caused an \#AC because of something the guest did. If we interpret the
822 * instruction as-is, we'll likely just repeat the split-lock access and
823 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
824 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
825 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
826 * disregard the lock prefix when emulating the instruction.
827 *
828 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
829 * feature when entering guest context, but the support for the feature isn't a
830 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
831 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
832 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
833 * propert detection to SUPDrv later if we find it necessary.
834 *
835 * @see @bugref{10052}
836 *
837 * @returns Strict VBox status code.
838 * @param pVM The cross context VM structure.
839 * @param pVCpu The cross context virtual CPU structure.
840 */
841VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
842{
843 LogFunc(("\n"));
844 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
845}
846#endif /* VBOX_VMM_TARGET_ARMV8 */
847
848
849/**
850 * Debug loop.
851 *
852 * @returns VBox status code for EM.
853 * @param pVM The cross context VM structure.
854 * @param pVCpu The cross context virtual CPU structure.
855 * @param rc Current EM VBox status code.
856 */
857static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
858{
859 for (;;)
860 {
861 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
862 const VBOXSTRICTRC rcLast = rc;
863
864 /*
865 * Debug related RC.
866 */
867 switch (VBOXSTRICTRC_VAL(rc))
868 {
869 /*
870 * Single step an instruction.
871 */
872 case VINF_EM_DBG_STEP:
873 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
874 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
875 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
876#if !defined(VBOX_VMM_TARGET_ARMV8)
877 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
878 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
879#endif
880 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
881 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
882 else
883 {
884 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
885 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
886 rc = VINF_EM_DBG_STEPPED;
887 }
888 break;
889
890 /*
891 * Simple events: stepped, breakpoint, stop/assertion.
892 */
893 case VINF_EM_DBG_STEPPED:
894 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
895 break;
896
897 case VINF_EM_DBG_BREAKPOINT:
898 rc = DBGFR3BpHit(pVM, pVCpu);
899 break;
900
901 case VINF_EM_DBG_STOP:
902 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
903 break;
904
905 case VINF_EM_DBG_EVENT:
906 rc = DBGFR3EventHandlePending(pVM, pVCpu);
907 break;
908
909 case VINF_EM_DBG_HYPER_STEPPED:
910 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
911 break;
912
913 case VINF_EM_DBG_HYPER_BREAKPOINT:
914 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
915 break;
916
917 case VINF_EM_DBG_HYPER_ASSERTION:
918 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
919 RTLogFlush(NULL);
920 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
921 break;
922
923 /*
924 * Guru meditation.
925 */
926 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
927 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
928 break;
929 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
930 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
931 break;
932
933 default: /** @todo don't use default for guru, but make special errors code! */
934 {
935 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
936 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
937 break;
938 }
939 }
940
941 /*
942 * Process the result.
943 */
944 switch (VBOXSTRICTRC_VAL(rc))
945 {
946 /*
947 * Continue the debugging loop.
948 */
949 case VINF_EM_DBG_STEP:
950 case VINF_EM_DBG_STOP:
951 case VINF_EM_DBG_EVENT:
952 case VINF_EM_DBG_STEPPED:
953 case VINF_EM_DBG_BREAKPOINT:
954 case VINF_EM_DBG_HYPER_STEPPED:
955 case VINF_EM_DBG_HYPER_BREAKPOINT:
956 case VINF_EM_DBG_HYPER_ASSERTION:
957 break;
958
959 /*
960 * Resuming execution (in some form) has to be done here if we got
961 * a hypervisor debug event.
962 */
963 case VINF_SUCCESS:
964 case VINF_EM_RESUME:
965 case VINF_EM_SUSPEND:
966 case VINF_EM_RESCHEDULE:
967 case VINF_EM_RESCHEDULE_RAW:
968 case VINF_EM_RESCHEDULE_REM:
969 case VINF_EM_HALT:
970 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
971 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
972 if (rc == VINF_SUCCESS)
973 rc = VINF_EM_RESCHEDULE;
974 return rc;
975
976 /*
977 * The debugger isn't attached.
978 * We'll simply turn the thing off since that's the easiest thing to do.
979 */
980 case VERR_DBGF_NOT_ATTACHED:
981 switch (VBOXSTRICTRC_VAL(rcLast))
982 {
983 case VINF_EM_DBG_HYPER_STEPPED:
984 case VINF_EM_DBG_HYPER_BREAKPOINT:
985 case VINF_EM_DBG_HYPER_ASSERTION:
986 case VERR_TRPM_PANIC:
987 case VERR_TRPM_DONT_PANIC:
988 case VERR_VMM_RING0_ASSERTION:
989 case VERR_VMM_HYPER_CR3_MISMATCH:
990 case VERR_VMM_RING3_CALL_DISABLED:
991 return rcLast;
992 }
993 return VINF_EM_OFF;
994
995 /*
996 * Status codes terminating the VM in one or another sense.
997 */
998 case VINF_EM_TERMINATE:
999 case VINF_EM_OFF:
1000 case VINF_EM_RESET:
1001 case VINF_EM_NO_MEMORY:
1002 case VINF_EM_RAW_STALE_SELECTOR:
1003 case VINF_EM_RAW_IRET_TRAP:
1004 case VERR_TRPM_PANIC:
1005 case VERR_TRPM_DONT_PANIC:
1006 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1007 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1008 case VERR_VMM_RING0_ASSERTION:
1009 case VERR_VMM_HYPER_CR3_MISMATCH:
1010 case VERR_VMM_RING3_CALL_DISABLED:
1011 case VERR_INTERNAL_ERROR:
1012 case VERR_INTERNAL_ERROR_2:
1013 case VERR_INTERNAL_ERROR_3:
1014 case VERR_INTERNAL_ERROR_4:
1015 case VERR_INTERNAL_ERROR_5:
1016 case VERR_IPE_UNEXPECTED_STATUS:
1017 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1018 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1019 return rc;
1020
1021 /*
1022 * The rest is unexpected, and will keep us here.
1023 */
1024 default:
1025 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1026 break;
1027 }
1028 } /* debug for ever */
1029}
1030
1031
1032/**
1033 * Executes recompiled code.
1034 *
1035 * This function contains the recompiler version of the inner
1036 * execution loop (the outer loop being in EMR3ExecuteVM()).
1037 *
1038 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1039 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1040 *
1041 * @param pVM The cross context VM structure.
1042 * @param pVCpu The cross context virtual CPU structure.
1043 * @param pfFFDone Where to store an indicator telling whether or not
1044 * FFs were done before returning.
1045 *
1046 */
1047static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1048{
1049 STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a);
1050#ifdef VBOX_VMM_TARGET_ARMV8
1051 LogFlow(("emR3RecompilerExecute/%u: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64));
1052#else
1053 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
1054#endif
1055
1056 /*
1057 * Loop till we get a forced action which returns anything but VINF_SUCCESS.
1058 */
1059 *pfFFDone = false;
1060 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1061 for (;;)
1062 {
1063#ifdef LOG_ENABLED
1064# if defined(VBOX_VMM_TARGET_ARMV8)
1065 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1066# else
1067 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1068 Log(("EMR%d: %04X:%08RX64 RSP=%08RX64 IF=%d CR0=%x eflags=%x\n", CPUMGetGuestCPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel,
1069 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF,
1070 (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1071 else
1072 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1073# endif
1074#endif
1075
1076 /*
1077 * Execute.
1078 */
1079 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1080 {
1081 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1082#ifdef VBOX_WITH_IEM_RECOMPILER
1083 if (pVM->em.s.fIemRecompiled)
1084 rcStrict = IEMExecRecompilerThreaded(pVM, pVCpu);
1085 else
1086#endif
1087 rcStrict = IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/);
1088 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1089 }
1090 else
1091 {
1092 /* Give up this time slice; virtual time continues */
1093 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1094 RTThreadSleep(5);
1095 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1096 rcStrict = VINF_SUCCESS;
1097 }
1098
1099 /*
1100 * Deal with high priority post execution FFs before doing anything
1101 * else. Sync back the state and leave the lock to be on the safe side.
1102 */
1103 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1104 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1105 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
1106
1107 /*
1108 * Process the returned status code.
1109 */
1110 if (rcStrict != VINF_SUCCESS)
1111 {
1112#if 0
1113 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST))
1114 break;
1115 /* Fatal error: */
1116#endif
1117 break;
1118 }
1119
1120
1121 /*
1122 * Check and execute forced actions.
1123 *
1124 * Sync back the VM state and leave the lock before calling any of
1125 * these, you never know what's going to happen here.
1126 */
1127#ifdef VBOX_HIGH_RES_TIMERS_HACK
1128 TMTimerPollVoid(pVM, pVCpu);
1129#endif
1130 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1131 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1132 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1133 {
1134 rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
1135 VBOXVMM_EM_FF_ALL_RET(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1136 if ( rcStrict != VINF_SUCCESS
1137 && rcStrict != VINF_EM_RESCHEDULE_REM)
1138 {
1139 *pfFFDone = true;
1140 break;
1141 }
1142 }
1143
1144 } /* The Inner Loop, recompiled execution mode version. */
1145
1146 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatREMTotal, a);
1147 return rcStrict;
1148}
1149
1150
1151/**
1152 * Decides whether to execute HM, NEM, IEM/interpreter or IEM/recompiler.
1153 *
1154 * @returns new EM state
1155 * @param pVM The cross context VM structure.
1156 * @param pVCpu The cross context virtual CPU structure.
1157 */
1158EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1159{
1160 /*
1161 * We stay in the wait for SIPI state unless explicitly told otherwise.
1162 */
1163 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1164 return EMSTATE_WAIT_SIPI;
1165
1166 /*
1167 * Execute everything in IEM?
1168 */
1169 if ( pVM->em.s.fIemExecutesAll
1170 || VM_IS_EXEC_ENGINE_IEM(pVM))
1171#ifdef VBOX_WITH_IEM_RECOMPILER
1172 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1173#else
1174 return EMSTATE_IEM;
1175#endif
1176
1177#if !defined(VBOX_VMM_TARGET_ARMV8)
1178 if (VM_IS_HM_ENABLED(pVM))
1179 {
1180 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1181 return EMSTATE_HM;
1182 }
1183 else
1184#endif
1185 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1186 return EMSTATE_NEM;
1187
1188 /*
1189 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1190 * turns off monitoring features essential for raw mode!
1191 */
1192#ifdef VBOX_WITH_IEM_RECOMPILER
1193 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1194#else
1195 return EMSTATE_IEM;
1196#endif
1197}
1198
1199
1200/**
1201 * Executes all high priority post execution force actions.
1202 *
1203 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1204 * fatal error status code.
1205 *
1206 * @param pVM The cross context VM structure.
1207 * @param pVCpu The cross context virtual CPU structure.
1208 * @param rc The current strict VBox status code rc.
1209 */
1210VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1211{
1212 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1213
1214 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1215 PDMCritSectBothFF(pVM, pVCpu);
1216
1217#if !defined(VBOX_VMM_TARGET_ARMV8)
1218 /* Update CR3 (Nested Paging case for HM). */
1219 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1220 {
1221 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1222 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1223 if (RT_FAILURE(rc2))
1224 return rc2;
1225 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1226 }
1227#endif
1228
1229 /* IEM has pending work (typically memory write after INS instruction). */
1230 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1231 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1232
1233 /* IOM has pending work (comitting an I/O or MMIO write). */
1234 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1235 {
1236 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1237 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1238 { /* half likely, or at least it's a line shorter. */ }
1239 else if (rc == VINF_SUCCESS)
1240 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1241 else
1242 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1243 }
1244
1245 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1246 {
1247 if ( rc > VINF_EM_NO_MEMORY
1248 && rc <= VINF_EM_LAST)
1249 rc = VINF_EM_NO_MEMORY;
1250 }
1251
1252 return rc;
1253}
1254
1255
1256#if !defined(VBOX_VMM_TARGET_ARMV8)
1257/**
1258 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1259 *
1260 * @returns VBox status code.
1261 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1262 * @param pVCpu The cross context virtual CPU structure.
1263 */
1264static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1265{
1266#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1267 /* Handle the "external interrupt" VM-exit intercept. */
1268 if ( CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1269 && !CPUMIsGuestVmxExitCtlsSet(&pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1270 {
1271 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1272 AssertMsg( rcStrict != VINF_VMX_VMEXIT /* VM-exit should have been converted to VINF_SUCCESS. */
1273 && rcStrict != VINF_NO_CHANGE
1274 && rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1275 return VBOXSTRICTRC_VAL(rcStrict);
1276 }
1277#else
1278 RT_NOREF(pVCpu);
1279#endif
1280 return VINF_NO_CHANGE;
1281}
1282
1283
1284/**
1285 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1286 *
1287 * @returns VBox status code.
1288 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1289 * @param pVCpu The cross context virtual CPU structure.
1290 */
1291static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1292{
1293#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1294 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1295 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1296 {
1297 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1298 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1299 if (RT_SUCCESS(rcStrict))
1300 {
1301 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1302 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1303 return VBOXSTRICTRC_VAL(rcStrict);
1304 }
1305
1306 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1307 return VINF_EM_TRIPLE_FAULT;
1308 }
1309#else
1310 NOREF(pVCpu);
1311#endif
1312 return VINF_NO_CHANGE;
1313}
1314
1315
1316/**
1317 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1318 *
1319 * @returns VBox status code.
1320 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1321 * @param pVCpu The cross context virtual CPU structure.
1322 */
1323static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1324{
1325#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1326 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1327 {
1328 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1329 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1330 if (RT_SUCCESS(rcStrict))
1331 {
1332 Assert(rcStrict != VINF_SVM_VMEXIT);
1333 return VBOXSTRICTRC_VAL(rcStrict);
1334 }
1335 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1336 return VINF_EM_TRIPLE_FAULT;
1337 }
1338#else
1339 NOREF(pVCpu);
1340#endif
1341 return VINF_NO_CHANGE;
1342}
1343#endif
1344
1345
1346/**
1347 * Executes all pending forced actions.
1348 *
1349 * Forced actions can cause execution delays and execution
1350 * rescheduling. The first we deal with using action priority, so
1351 * that for instance pending timers aren't scheduled and ran until
1352 * right before execution. The rescheduling we deal with using
1353 * return codes. The same goes for VM termination, only in that case
1354 * we exit everything.
1355 *
1356 * @returns VBox status code of equal or greater importance/severity than rc.
1357 * The most important ones are: VINF_EM_RESCHEDULE,
1358 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1359 *
1360 * @param pVM The cross context VM structure.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param rc The current rc.
1363 *
1364 */
1365int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1366{
1367 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1368#ifdef VBOX_STRICT
1369 int rcIrq = VINF_SUCCESS;
1370#endif
1371 int rc2;
1372#define UPDATE_RC() \
1373 do { \
1374 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1375 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1376 break; \
1377 if (!rc || rc2 < rc) \
1378 rc = rc2; \
1379 } while (0)
1380 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1381
1382 /*
1383 * Post execution chunk first.
1384 */
1385 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1386 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1387 {
1388 /*
1389 * EMT Rendezvous (must be serviced before termination).
1390 */
1391 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1392 {
1393 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1394 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1395 UPDATE_RC();
1396 /** @todo HACK ALERT! The following test is to make sure EM+TM
1397 * thinks the VM is stopped/reset before the next VM state change
1398 * is made. We need a better solution for this, or at least make it
1399 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1400 * VINF_EM_SUSPEND). */
1401 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1402 {
1403 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1404 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1405 return rc;
1406 }
1407 }
1408
1409 /*
1410 * State change request (cleared by vmR3SetStateLocked).
1411 */
1412 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1413 {
1414 VMSTATE enmState = VMR3GetState(pVM);
1415 switch (enmState)
1416 {
1417 case VMSTATE_FATAL_ERROR:
1418 case VMSTATE_FATAL_ERROR_LS:
1419 case VMSTATE_GURU_MEDITATION:
1420 case VMSTATE_GURU_MEDITATION_LS:
1421 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1422 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1423 return VINF_EM_SUSPEND;
1424
1425 case VMSTATE_DESTROYING:
1426 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1427 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1428 return VINF_EM_TERMINATE;
1429
1430 default:
1431 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1432 }
1433 }
1434
1435 /*
1436 * Debugger Facility polling.
1437 */
1438 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1439 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1440 {
1441 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1442 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1443 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1444 * somewhere before we get here, I would think. */
1445 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1446 rc = rc2;
1447 else
1448 UPDATE_RC();
1449 }
1450
1451 /*
1452 * Postponed reset request.
1453 */
1454 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1455 {
1456 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1457 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1458 UPDATE_RC();
1459 }
1460
1461 /*
1462 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1463 */
1464 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1465 {
1466 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1467 UPDATE_RC();
1468 if (rc == VINF_EM_NO_MEMORY)
1469 return rc;
1470 }
1471
1472 /* check that we got them all */
1473 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1474 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1475 }
1476
1477 /*
1478 * Normal priority then.
1479 * (Executed in no particular order.)
1480 */
1481 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1482 {
1483 /*
1484 * PDM Queues are pending.
1485 */
1486 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1487 PDMR3QueueFlushAll(pVM);
1488
1489 /*
1490 * PDM DMA transfers are pending.
1491 */
1492 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1493 PDMR3DmaRun(pVM);
1494
1495 /*
1496 * EMT Rendezvous (make sure they are handled before the requests).
1497 */
1498 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1499 {
1500 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1501 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1502 UPDATE_RC();
1503 /** @todo HACK ALERT! The following test is to make sure EM+TM
1504 * thinks the VM is stopped/reset before the next VM state change
1505 * is made. We need a better solution for this, or at least make it
1506 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1507 * VINF_EM_SUSPEND). */
1508 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1509 {
1510 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1511 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1512 return rc;
1513 }
1514 }
1515
1516 /*
1517 * Requests from other threads.
1518 */
1519 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1520 {
1521 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1522 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1523 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1524 {
1525 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1526 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1527 return rc2;
1528 }
1529 UPDATE_RC();
1530 /** @todo HACK ALERT! The following test is to make sure EM+TM
1531 * thinks the VM is stopped/reset before the next VM state change
1532 * is made. We need a better solution for this, or at least make it
1533 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1534 * VINF_EM_SUSPEND). */
1535 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1536 {
1537 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1538 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1539 return rc;
1540 }
1541 }
1542
1543 /* check that we got them all */
1544 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1545 }
1546
1547 /*
1548 * Normal priority then. (per-VCPU)
1549 * (Executed in no particular order.)
1550 */
1551 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1552 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1553 {
1554 /*
1555 * Requests from other threads.
1556 */
1557 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1558 {
1559 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1560 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1561 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1562 {
1563 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1564 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1565 return rc2;
1566 }
1567 UPDATE_RC();
1568 /** @todo HACK ALERT! The following test is to make sure EM+TM
1569 * thinks the VM is stopped/reset before the next VM state change
1570 * is made. We need a better solution for this, or at least make it
1571 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1572 * VINF_EM_SUSPEND). */
1573 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1574 {
1575 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1576 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1577 return rc;
1578 }
1579 }
1580
1581 /* check that we got them all */
1582 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1583 }
1584
1585 /*
1586 * High priority pre execution chunk last.
1587 * (Executed in ascending priority order.)
1588 */
1589 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1590 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1591 {
1592 /*
1593 * Timers before interrupts.
1594 */
1595 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1596 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1597 TMR3TimerQueuesDo(pVM);
1598
1599#if !defined(VBOX_VMM_TARGET_ARMV8)
1600 /*
1601 * Pick up asynchronously posted interrupts into the APIC.
1602 */
1603 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1604 APICUpdatePendingInterrupts(pVCpu);
1605
1606 /*
1607 * The instruction following an emulated STI should *always* be executed!
1608 *
1609 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1610 * the eip is the same as the inhibited instr address. Before we
1611 * are able to execute this instruction in raw mode (iret to
1612 * guest code) an external interrupt might force a world switch
1613 * again. Possibly allowing a guest interrupt to be dispatched
1614 * in the process. This could break the guest. Sounds very
1615 * unlikely, but such timing sensitive problem are not as rare as
1616 * you might think.
1617 *
1618 * Note! This used to be a force action flag. Can probably ditch this code.
1619 */
1620 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1621 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1622 {
1623 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1624 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1625 {
1626 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1627 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1628 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1629 }
1630 else
1631 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1632 }
1633
1634 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1635 * delivered. */
1636
1637# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1638 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1639 | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW))
1640 {
1641 /*
1642 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1643 * Takes priority over even SMI and INIT signals.
1644 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1645 */
1646 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1647 {
1648 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1649 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1650 UPDATE_RC();
1651 }
1652
1653 /*
1654 * APIC write emulation MAY have a caused a VM-exit.
1655 * If it did cause a VM-exit, there's no point checking the other VMX non-root mode FFs here.
1656 */
1657 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
1658 {
1659 /*
1660 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1661 * Takes priority over "Traps on the previous instruction".
1662 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1663 */
1664 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1665 {
1666 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1667 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1668 UPDATE_RC();
1669 }
1670 /*
1671 * VMX Nested-guest preemption timer VM-exit.
1672 * Takes priority over NMI-window VM-exits.
1673 */
1674 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1675 {
1676 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1677 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1678 UPDATE_RC();
1679 }
1680 /*
1681 * VMX interrupt-window and NMI-window VM-exits.
1682 * Takes priority over non-maskable interrupts (NMIs) and external interrupts respectively.
1683 * If we are in an interrupt shadow or if we already in the process of delivering
1684 * an event then these VM-exits cannot occur.
1685 *
1686 * Interrupt shadows block NMI-window VM-exits.
1687 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1688 *
1689 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1690 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1691 * See Intel spec. 6.7 "Nonmaskable Interrupt (NMI)".
1692 */
1693 else if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1694 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)
1695 && !TRPMHasTrap(pVCpu))
1696 {
1697 /*
1698 * VMX NMI-window VM-exit.
1699 */
1700 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1701 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1702 {
1703 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1704 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1705 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1706 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1707 && rc2 != VINF_VMX_VMEXIT
1708 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1709 UPDATE_RC();
1710 }
1711 /*
1712 * VMX interrupt-window VM-exit.
1713 * This is a bit messy with the way the code below is currently structured,
1714 * but checking VMCPU_FF_INTERRUPT_NMI here (combined with CPUMAreInterruptsInhibitedByNmi
1715 * already checked at this point) should allow a pending NMI to be delivered prior to
1716 * causing an interrupt-window VM-exit.
1717 */
1718 /** @todo Restructure this later to happen after injecting NMI/causing NMI-exit, see
1719 * code in VMX R0 event delivery. */
1720 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1721 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1722 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1723 {
1724 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1725 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1726 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1727 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1728 && rc2 != VINF_VMX_VMEXIT
1729 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1730 UPDATE_RC();
1731 }
1732 }
1733 }
1734
1735 /*
1736 * Interrupt-window and NMI-window force flags might still be pending if we didn't actually cause
1737 * a VM-exit above. They will get cleared eventually when ANY nested-guest VM-exit occurs.
1738 * However, the force flags asserted below MUST have been cleared at this point.
1739 */
1740 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1741 }
1742# endif
1743
1744 /*
1745 * Guest event injection.
1746 */
1747 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1748 bool fWakeupPending = false;
1749 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1750 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1751 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1752 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1753 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1754 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1755 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1756 {
1757 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1758 {
1759 bool fInVmxNonRootMode;
1760 bool fInSvmHwvirtMode;
1761 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1762 {
1763 fInVmxNonRootMode = false;
1764 fInSvmHwvirtMode = false;
1765 }
1766 else
1767 {
1768 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1769 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1770 }
1771
1772 /*
1773 * NMIs (take priority over external interrupts).
1774 */
1775 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1776 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1777 {
1778# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1779 if ( fInVmxNonRootMode
1780 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1781 {
1782 /* We MUST clear the NMI force-flag here, see @bugref{10318#c19}. */
1783 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1784 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1785 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1786 UPDATE_RC();
1787 }
1788 else
1789# endif
1790# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1791 if ( fInSvmHwvirtMode
1792 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1793 {
1794 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1795 AssertMsg( rc2 != VINF_SVM_VMEXIT
1796 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1797 UPDATE_RC();
1798 }
1799 else
1800# endif
1801 {
1802 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1803 if (rc2 == VINF_SUCCESS)
1804 {
1805 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1806 fWakeupPending = true;
1807 if (pVM->em.s.fIemExecutesAll)
1808 rc2 = VINF_EM_RESCHEDULE;
1809 else
1810 {
1811 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1812 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1813 : VINF_EM_RESCHEDULE_REM;
1814 }
1815 }
1816 UPDATE_RC();
1817 }
1818 }
1819# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1820 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1821 * actually pending like we currently do. */
1822# endif
1823 /*
1824 * External interrupts.
1825 */
1826 else
1827 {
1828 /*
1829 * VMX: virtual interrupts takes priority over physical interrupts.
1830 * SVM: physical interrupts takes priority over virtual interrupts.
1831 */
1832 if ( fInVmxNonRootMode
1833 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1834 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1835 {
1836 /** @todo NSTVMX: virtual-interrupt delivery. */
1837 rc2 = VINF_SUCCESS;
1838 }
1839 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1840 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1841 {
1842 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1843 if (fInVmxNonRootMode)
1844 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1845 else if (fInSvmHwvirtMode)
1846 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1847 else
1848 rc2 = VINF_NO_CHANGE;
1849
1850 if (rc2 == VINF_NO_CHANGE)
1851 {
1852 bool fInjected = false;
1853 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1854 /** @todo this really isn't nice, should properly handle this */
1855 /* Note! This can still cause a VM-exit (on Intel). */
1856 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1857 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1858 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1859 fWakeupPending = true;
1860 if ( pVM->em.s.fIemExecutesAll
1861 && ( rc2 == VINF_EM_RESCHEDULE_REM
1862 || rc2 == VINF_EM_RESCHEDULE_HM
1863 || rc2 == VINF_EM_RESCHEDULE_RAW))
1864 {
1865 rc2 = VINF_EM_RESCHEDULE;
1866 }
1867# ifdef VBOX_STRICT
1868 if (fInjected)
1869 rcIrq = rc2;
1870# endif
1871 }
1872 UPDATE_RC();
1873 }
1874 else if ( fInSvmHwvirtMode
1875 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1876 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1877 {
1878 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1879 if (rc2 == VINF_NO_CHANGE)
1880 {
1881 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1882 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1883 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1884 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1885 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1886 rc2 = VINF_EM_RESCHEDULE;
1887# ifdef VBOX_STRICT
1888 rcIrq = rc2;
1889# endif
1890 }
1891 UPDATE_RC();
1892 }
1893 }
1894 } /* CPUMGetGuestGif */
1895 }
1896
1897#else /* VBOX_VMM_TARGET_ARMV8 */
1898 bool fWakeupPending = false;
1899
1900 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VTIMER_ACTIVATED))
1901 {
1902 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VTIMER_ACTIVATED);
1903
1904 fWakeupPending = true;
1905 if (pVM->em.s.fIemExecutesAll)
1906 rc2 = VINF_EM_RESCHEDULE;
1907 else
1908 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1909 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1910 : VINF_EM_RESCHEDULE_REM;
1911 }
1912#endif /* VBOX_VMM_TARGET_ARMV8 */
1913
1914 /*
1915 * Allocate handy pages.
1916 */
1917 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1918 {
1919 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1920 UPDATE_RC();
1921 }
1922
1923 /*
1924 * Debugger Facility request.
1925 */
1926 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1927 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1928 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
1929 {
1930 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1931 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1932 UPDATE_RC();
1933 }
1934
1935 /*
1936 * EMT Rendezvous (must be serviced before termination).
1937 */
1938 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1939 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1940 {
1941 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1942 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1943 UPDATE_RC();
1944 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1945 * stopped/reset before the next VM state change is made. We need a better
1946 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1947 * && rc >= VINF_EM_SUSPEND). */
1948 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1949 {
1950 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1951 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1952 return rc;
1953 }
1954 }
1955
1956 /*
1957 * State change request (cleared by vmR3SetStateLocked).
1958 */
1959 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1960 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1961 {
1962 VMSTATE enmState = VMR3GetState(pVM);
1963 switch (enmState)
1964 {
1965 case VMSTATE_FATAL_ERROR:
1966 case VMSTATE_FATAL_ERROR_LS:
1967 case VMSTATE_GURU_MEDITATION:
1968 case VMSTATE_GURU_MEDITATION_LS:
1969 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1970 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1971 return VINF_EM_SUSPEND;
1972
1973 case VMSTATE_DESTROYING:
1974 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1975 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1976 return VINF_EM_TERMINATE;
1977
1978 default:
1979 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1980 }
1981 }
1982
1983 /*
1984 * Out of memory? Since most of our fellow high priority actions may cause us
1985 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1986 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1987 * than us since we can terminate without allocating more memory.
1988 */
1989 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1990 {
1991 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1992 UPDATE_RC();
1993 if (rc == VINF_EM_NO_MEMORY)
1994 return rc;
1995 }
1996
1997 /*
1998 * If the virtual sync clock is still stopped, make TM restart it.
1999 */
2000 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2001 TMR3VirtualSyncFF(pVM, pVCpu);
2002
2003#ifdef DEBUG
2004 /*
2005 * Debug, pause the VM.
2006 */
2007 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2008 {
2009 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2010 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2011 return VINF_EM_SUSPEND;
2012 }
2013#endif
2014
2015 /* check that we got them all */
2016 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2017#if defined(VBOX_VMM_TARGET_ARMV8)
2018 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_DBGF));
2019#else
2020 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2021#endif
2022 }
2023
2024#undef UPDATE_RC
2025 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2026 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2027 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2028 return rc;
2029}
2030
2031
2032/**
2033 * Check if the preset execution time cap restricts guest execution scheduling.
2034 *
2035 * @returns true if allowed, false otherwise
2036 * @param pVM The cross context VM structure.
2037 * @param pVCpu The cross context virtual CPU structure.
2038 */
2039bool emR3IsExecutionAllowedSlow(PVM pVM, PVMCPU pVCpu)
2040{
2041 Assert(pVM->uCpuExecutionCap != 100);
2042 uint64_t cMsUserTime;
2043 uint64_t cMsKernelTime;
2044 if (RT_SUCCESS(RTThreadGetExecutionTimeMilli(&cMsKernelTime, &cMsUserTime)))
2045 {
2046 uint64_t const msTimeNow = RTTimeMilliTS();
2047 if (pVCpu->em.s.msTimeSliceStart + EM_TIME_SLICE < msTimeNow)
2048 {
2049 /* New time slice. */
2050 pVCpu->em.s.msTimeSliceStart = msTimeNow;
2051 pVCpu->em.s.cMsTimeSliceStartExec = cMsKernelTime + cMsUserTime;
2052 pVCpu->em.s.cMsTimeSliceExec = 0;
2053 }
2054 pVCpu->em.s.cMsTimeSliceExec = cMsKernelTime + cMsUserTime - pVCpu->em.s.cMsTimeSliceStartExec;
2055
2056 bool const fRet = pVCpu->em.s.cMsTimeSliceExec < (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100;
2057 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.msTimeSliceStart,
2058 pVCpu->em.s.cMsTimeSliceStartExec, pVCpu->em.s.cMsTimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2059 return fRet;
2060 }
2061 return true;
2062}
2063
2064
2065/**
2066 * Execute VM.
2067 *
2068 * This function is the main loop of the VM. The emulation thread
2069 * calls this function when the VM has been successfully constructed
2070 * and we're ready for executing the VM.
2071 *
2072 * Returning from this function means that the VM is turned off or
2073 * suspended (state already saved) and deconstruction is next in line.
2074 *
2075 * All interaction from other thread are done using forced actions
2076 * and signalling of the wait object.
2077 *
2078 * @returns VBox status code, informational status codes may indicate failure.
2079 * @param pVM The cross context VM structure.
2080 * @param pVCpu The cross context virtual CPU structure.
2081 */
2082VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2083{
2084 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2085 pVM,
2086 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2087 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2088 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2089 VM_ASSERT_EMT(pVM);
2090 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2091 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2092 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2093 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2094
2095 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2096 if (rc == 0)
2097 {
2098 /*
2099 * Start the virtual time.
2100 */
2101 TMR3NotifyResume(pVM, pVCpu);
2102
2103 /*
2104 * The Outer Main Loop.
2105 */
2106 bool fFFDone = false;
2107
2108 /* Reschedule right away to start in the right state. */
2109 rc = VINF_SUCCESS;
2110
2111 /* If resuming after a pause or a state load, restore the previous
2112 state or else we'll start executing code. Else, just reschedule. */
2113 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2114 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2115 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2116 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2117 else
2118 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2119 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2120
2121 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2122 for (;;)
2123 {
2124 /*
2125 * Before we can schedule anything (we're here because
2126 * scheduling is required) we must service any pending
2127 * forced actions to avoid any pending action causing
2128 * immediate rescheduling upon entering an inner loop
2129 *
2130 * Do forced actions.
2131 */
2132 if ( !fFFDone
2133 && RT_SUCCESS(rc)
2134 && rc != VINF_EM_TERMINATE
2135 && rc != VINF_EM_OFF
2136 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2137 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2138 {
2139 rc = emR3ForcedActions(pVM, pVCpu, rc);
2140 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2141 }
2142 else if (fFFDone)
2143 fFFDone = false;
2144
2145#if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8)
2146 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2147#endif
2148
2149 /*
2150 * Now what to do?
2151 */
2152 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2153 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2154 switch (rc)
2155 {
2156 /*
2157 * Keep doing what we're currently doing.
2158 */
2159 case VINF_SUCCESS:
2160 break;
2161
2162 /*
2163 * Reschedule - to raw-mode execution.
2164 */
2165/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2166 case VINF_EM_RESCHEDULE_RAW:
2167 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2168 AssertLogRelFailed();
2169 pVCpu->em.s.enmState = EMSTATE_NONE;
2170 break;
2171
2172 /*
2173 * Reschedule - to HM or NEM.
2174 */
2175 case VINF_EM_RESCHEDULE_HM:
2176 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2177#if !defined(VBOX_VMM_TARGET_ARMV8)
2178 if (VM_IS_HM_ENABLED(pVM))
2179 {
2180 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2181 {
2182 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2183 pVCpu->em.s.enmState = EMSTATE_HM;
2184 }
2185 else
2186 {
2187 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_RECOMPILER)\n", enmOldState, EMSTATE_RECOMPILER));
2188 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2189 }
2190 }
2191 else
2192#endif
2193 if (VM_IS_NEM_ENABLED(pVM))
2194 {
2195 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2196 pVCpu->em.s.enmState = EMSTATE_NEM;
2197 }
2198 else
2199 {
2200 AssertLogRelFailed();
2201 pVCpu->em.s.enmState = EMSTATE_NONE;
2202 }
2203 break;
2204
2205 /*
2206 * Reschedule - to recompiled execution.
2207 */
2208 case VINF_EM_RESCHEDULE_REM:
2209 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2210 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n",
2211 enmOldState, EMSTATE_RECOMPILER));
2212 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2213 break;
2214
2215 /*
2216 * Resume.
2217 */
2218 case VINF_EM_RESUME:
2219 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2220 /* Don't reschedule in the halted or wait for SIPI case. */
2221 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2222 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2223 {
2224 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2225 break;
2226 }
2227 /* fall through and get scheduled. */
2228 RT_FALL_THRU();
2229
2230 /*
2231 * Reschedule.
2232 */
2233 case VINF_EM_RESCHEDULE:
2234 {
2235 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2236 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2237 pVCpu->em.s.enmState = enmState;
2238 break;
2239 }
2240
2241 /*
2242 * Halted.
2243 */
2244 case VINF_EM_HALT:
2245 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2246 pVCpu->em.s.enmState = EMSTATE_HALTED;
2247 break;
2248
2249 /*
2250 * Switch to the wait for SIPI state (application processor only)
2251 */
2252 case VINF_EM_WAIT_SIPI:
2253 Assert(pVCpu->idCpu != 0);
2254 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2255 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2256 break;
2257
2258
2259 /*
2260 * Suspend.
2261 */
2262 case VINF_EM_SUSPEND:
2263 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2264 Assert(enmOldState != EMSTATE_SUSPENDED);
2265 pVCpu->em.s.enmPrevState = enmOldState;
2266 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2267 break;
2268
2269 /*
2270 * Reset.
2271 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2272 */
2273 case VINF_EM_RESET:
2274 {
2275 if (pVCpu->idCpu == 0)
2276 {
2277 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2278 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2279 pVCpu->em.s.enmState = enmState;
2280 }
2281 else
2282 {
2283 /* All other VCPUs go into the wait for SIPI state. */
2284 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2285 }
2286 break;
2287 }
2288
2289 /*
2290 * Power Off.
2291 */
2292 case VINF_EM_OFF:
2293 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2294 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2295 TMR3NotifySuspend(pVM, pVCpu);
2296 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2297 return rc;
2298
2299 /*
2300 * Terminate the VM.
2301 */
2302 case VINF_EM_TERMINATE:
2303 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2304 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2305 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2306 TMR3NotifySuspend(pVM, pVCpu);
2307 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2308 return rc;
2309
2310
2311 /*
2312 * Out of memory, suspend the VM and stuff.
2313 */
2314 case VINF_EM_NO_MEMORY:
2315 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2316 Assert(enmOldState != EMSTATE_SUSPENDED);
2317 pVCpu->em.s.enmPrevState = enmOldState;
2318 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2319 TMR3NotifySuspend(pVM, pVCpu);
2320 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2321
2322 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2323 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2324 if (rc != VINF_EM_SUSPEND)
2325 {
2326 if (RT_SUCCESS_NP(rc))
2327 {
2328 AssertLogRelMsgFailed(("%Rrc\n", rc));
2329 rc = VERR_EM_INTERNAL_ERROR;
2330 }
2331 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2332 }
2333 return rc;
2334
2335 /*
2336 * Guest debug events.
2337 */
2338 case VINF_EM_DBG_STEPPED:
2339 case VINF_EM_DBG_STOP:
2340 case VINF_EM_DBG_EVENT:
2341 case VINF_EM_DBG_BREAKPOINT:
2342 case VINF_EM_DBG_STEP:
2343 if (enmOldState == EMSTATE_HM)
2344 {
2345 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2346 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2347 }
2348 else if (enmOldState == EMSTATE_NEM)
2349 {
2350 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2351 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2352 }
2353 else if (enmOldState == EMSTATE_RECOMPILER)
2354 {
2355 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RECOMPILER));
2356 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RECOMPILER;
2357 }
2358 else
2359 {
2360 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2361 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2362 }
2363 break;
2364
2365 /*
2366 * Hypervisor debug events.
2367 */
2368 case VINF_EM_DBG_HYPER_STEPPED:
2369 case VINF_EM_DBG_HYPER_BREAKPOINT:
2370 case VINF_EM_DBG_HYPER_ASSERTION:
2371 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2372 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2373 break;
2374
2375 /*
2376 * Triple fault.
2377 */
2378 case VINF_EM_TRIPLE_FAULT:
2379 if (!pVM->em.s.fGuruOnTripleFault)
2380 {
2381 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2382 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2383 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2384 continue;
2385 }
2386 /* Else fall through and trigger a guru. */
2387 RT_FALL_THRU();
2388
2389 case VERR_VMM_RING0_ASSERTION:
2390 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2391 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2392 break;
2393
2394 /*
2395 * Any error code showing up here other than the ones we
2396 * know and process above are considered to be FATAL.
2397 *
2398 * Unknown warnings and informational status codes are also
2399 * included in this.
2400 */
2401 default:
2402 if (RT_SUCCESS_NP(rc))
2403 {
2404 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2405 rc = VERR_EM_INTERNAL_ERROR;
2406 }
2407 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2408 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2409 break;
2410 }
2411
2412 /*
2413 * Act on state transition.
2414 */
2415 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2416 if (enmOldState != enmNewState)
2417 {
2418 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2419
2420 /* Clear MWait flags and the unhalt FF. */
2421 if ( enmOldState == EMSTATE_HALTED
2422 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2423 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2424 && ( enmNewState == EMSTATE_HM
2425 || enmNewState == EMSTATE_NEM
2426 || enmNewState == EMSTATE_RECOMPILER
2427 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2428 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2429 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2430 || enmNewState == EMSTATE_DEBUG_GUEST_RECOMPILER) )
2431 {
2432 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2433 {
2434 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2435 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2436 }
2437 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2438 {
2439 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2440 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2441 }
2442 }
2443 }
2444 else
2445 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2446
2447 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2448 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2449
2450 /*
2451 * Act on the new state.
2452 */
2453 switch (enmNewState)
2454 {
2455 /*
2456 * Execute hardware accelerated raw.
2457 */
2458 case EMSTATE_HM:
2459#if defined(VBOX_VMM_TARGET_ARMV8)
2460 AssertReleaseFailed(); /* Should never get here. */
2461#else
2462 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2463#endif
2464 break;
2465
2466 /*
2467 * Execute hardware accelerated raw.
2468 */
2469 case EMSTATE_NEM:
2470 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2471 break;
2472
2473 /*
2474 * Execute recompiled.
2475 */
2476 case EMSTATE_RECOMPILER:
2477 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, &fFFDone));
2478 Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc));
2479 break;
2480
2481 /*
2482 * Execute in the interpreter.
2483 */
2484 case EMSTATE_IEM:
2485 {
2486#if 0 /* For comparing HM and IEM (@bugref{10464}). */
2487 PCPUMCTX const pCtx = &pVCpu->cpum.GstCtx;
2488 PCX86FXSTATE const pX87 = &pCtx->XState.x87;
2489 Log11(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
2490 "eip=%08x esp=%08x ebp=%08x eflags=%08x\n"
2491 "cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x\n"
2492 "fsw=%04x fcw=%04x ftw=%02x top=%u%s%s%s%s%s%s%s%s%s\n"
2493 "st0=%.10Rhxs st1=%.10Rhxs st2=%.10Rhxs st3=%.10Rhxs\n"
2494 "st4=%.10Rhxs st5=%.10Rhxs st6=%.10Rhxs st7=%.10Rhxs\n",
2495 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->edi, pCtx->edi,
2496 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.u,
2497 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, pCtx->fs.Sel, pCtx->gs.Sel,
2498 pX87->FSW, pX87->FCW, pX87->FTW, X86_FSW_TOP_GET(pX87->FSW),
2499 pX87->FSW & X86_FSW_ES ? " ES!" : "",
2500 pX87->FSW & X86_FSW_IE ? " IE" : "",
2501 pX87->FSW & X86_FSW_DE ? " DE" : "",
2502 pX87->FSW & X86_FSW_SF ? " SF" : "",
2503 pX87->FSW & X86_FSW_B ? " B!" : "",
2504 pX87->FSW & X86_FSW_C0 ? " C0" : "",
2505 pX87->FSW & X86_FSW_C1 ? " C1" : "",
2506 pX87->FSW & X86_FSW_C2 ? " C2" : "",
2507 pX87->FSW & X86_FSW_C3 ? " C3" : "",
2508 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(0)],
2509 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(1)],
2510 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(2)],
2511 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(3)],
2512 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(4)],
2513 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(5)],
2514 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(6)],
2515 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(7)]));
2516 DBGFR3DisasInstrCurrentLogInternal(pVCpu, NULL);
2517#endif
2518
2519 uint32_t cInstructions = 0;
2520#if 0 /* For testing purposes. */
2521 //STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2522 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2523 //STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2524 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2525 rc = VINF_SUCCESS;
2526 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2527#endif
2528 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2529 if (pVM->em.s.fIemExecutesAll)
2530 {
2531 Assert(rc != VINF_EM_RESCHEDULE_REM);
2532 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2533 Assert(rc != VINF_EM_RESCHEDULE_HM);
2534#ifdef VBOX_HIGH_RES_TIMERS_HACK
2535 if (cInstructions < 2048)
2536 TMTimerPollVoid(pVM, pVCpu);
2537#endif
2538 }
2539 fFFDone = false;
2540 break;
2541 }
2542
2543 /*
2544 * Application processor execution halted until SIPI.
2545 */
2546 case EMSTATE_WAIT_SIPI:
2547 /* no break */
2548 /*
2549 * hlt - execution halted until interrupt.
2550 */
2551 case EMSTATE_HALTED:
2552 {
2553 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2554 /* If HM (or someone else) store a pending interrupt in
2555 TRPM, it must be dispatched ASAP without any halting.
2556 Anything pending in TRPM has been accepted and the CPU
2557 should already be the right state to receive it. */
2558 if (TRPMHasTrap(pVCpu))
2559 rc = VINF_EM_RESCHEDULE;
2560#if !defined(VBOX_VMM_TARGET_ARMV8)
2561 /* MWAIT has a special extension where it's woken up when
2562 an interrupt is pending even when IF=0. */
2563 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2564 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2565 {
2566 rc = VMR3WaitHalted(pVM, pVCpu, 0 /*fFlags*/);
2567 if (rc == VINF_SUCCESS)
2568 {
2569 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2570 APICUpdatePendingInterrupts(pVCpu);
2571
2572 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2573 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2574 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2575 {
2576 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2577 rc = VINF_EM_RESCHEDULE;
2578 }
2579
2580 }
2581 }
2582#endif
2583 else
2584 {
2585#if defined(VBOX_VMM_TARGET_ARMV8)
2586 const uint32_t fWaitHalted = 0; /* WFI/WFE always return when an interrupt happens. */
2587#else
2588 const uint32_t fWaitHalted = (CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF) ? 0 : VMWAITHALTED_F_IGNORE_IRQS;
2589#endif
2590 rc = VMR3WaitHalted(pVM, pVCpu, fWaitHalted);
2591 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2592 check VMCPU_FF_UPDATE_APIC here. */
2593 if ( rc == VINF_SUCCESS
2594#if defined(VBOX_VMM_TARGET_ARMV8)
2595 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_VTIMER_ACTIVATED
2596 | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_INTERRUPT_IRQ)
2597#else
2598 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)
2599#endif
2600 )
2601 {
2602 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2603 rc = VINF_EM_RESCHEDULE;
2604 }
2605 }
2606
2607 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2608 break;
2609 }
2610
2611 /*
2612 * Suspended - return to VM.cpp.
2613 */
2614 case EMSTATE_SUSPENDED:
2615 TMR3NotifySuspend(pVM, pVCpu);
2616 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2617 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2618 return VINF_EM_SUSPEND;
2619
2620 /*
2621 * Debugging in the guest.
2622 */
2623 case EMSTATE_DEBUG_GUEST_RAW:
2624 case EMSTATE_DEBUG_GUEST_HM:
2625 case EMSTATE_DEBUG_GUEST_NEM:
2626 case EMSTATE_DEBUG_GUEST_IEM:
2627 case EMSTATE_DEBUG_GUEST_RECOMPILER:
2628 TMR3NotifySuspend(pVM, pVCpu);
2629 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2630 TMR3NotifyResume(pVM, pVCpu);
2631 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2632 break;
2633
2634 /*
2635 * Debugging in the hypervisor.
2636 */
2637 case EMSTATE_DEBUG_HYPER:
2638 {
2639 TMR3NotifySuspend(pVM, pVCpu);
2640 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2641
2642 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2643 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2644 if (rc != VINF_SUCCESS)
2645 {
2646 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2647 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2648 else
2649 {
2650 /* switch to guru meditation mode */
2651 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2652 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2653 VMMR3FatalDump(pVM, pVCpu, rc);
2654 }
2655 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2656 return rc;
2657 }
2658
2659 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2660 TMR3NotifyResume(pVM, pVCpu);
2661 break;
2662 }
2663
2664 /*
2665 * Guru meditation takes place in the debugger.
2666 */
2667 case EMSTATE_GURU_MEDITATION:
2668 {
2669 TMR3NotifySuspend(pVM, pVCpu);
2670 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2671 VMMR3FatalDump(pVM, pVCpu, rc);
2672 emR3Debug(pVM, pVCpu, rc);
2673 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2674 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2675 return rc;
2676 }
2677
2678 /*
2679 * The states we don't expect here.
2680 */
2681 case EMSTATE_NONE:
2682 case EMSTATE_RAW_OBSOLETE:
2683 case EMSTATE_IEM_THEN_REM_OBSOLETE:
2684 case EMSTATE_TERMINATING:
2685 default:
2686 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2687 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2688 TMR3NotifySuspend(pVM, pVCpu);
2689 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2690 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2691 return VERR_EM_INTERNAL_ERROR;
2692 }
2693 } /* The Outer Main Loop */
2694 }
2695 else
2696 {
2697 /*
2698 * Fatal error.
2699 */
2700 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2701 TMR3NotifySuspend(pVM, pVCpu);
2702 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2703 VMMR3FatalDump(pVM, pVCpu, rc);
2704 emR3Debug(pVM, pVCpu, rc);
2705 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2706 /** @todo change the VM state! */
2707 return rc;
2708 }
2709
2710 /* not reached */
2711}
2712
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette