VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 96441

Last change on this file since 96441 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 111.7 KB
Line 
1/* $Id: EM.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RemExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/disopcode.h>
73#include <VBox/err.h>
74#include "VMMTracing.h"
75
76#include <iprt/asm.h>
77#include <iprt/string.h>
78#include <iprt/stream.h>
79#include <iprt/thread.h>
80
81
82/*********************************************************************************************************************************
83* Internal Functions *
84*********************************************************************************************************************************/
85static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
88static const char *emR3GetStateName(EMSTATE enmState);
89#endif
90static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
91#if defined(VBOX_WITH_REM) || defined(DEBUG)
92static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
93#endif
94static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
95
96
97/**
98 * Initializes the EM.
99 *
100 * @returns VBox status code.
101 * @param pVM The cross context VM structure.
102 */
103VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
104{
105 LogFlow(("EMR3Init\n"));
106 /*
107 * Assert alignment and sizes.
108 */
109 AssertCompileMemberAlignment(VM, em.s, 32);
110 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
111 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
112 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
113
114 /*
115 * Init the structure.
116 */
117 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
118 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
119
120 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
121#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN)
122 true
123#else
124 false
125#endif
126 );
127 AssertLogRelRCReturn(rc, rc);
128
129 bool fEnabled;
130 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
131 AssertLogRelRCReturn(rc, rc);
132 pVM->em.s.fGuruOnTripleFault = !fEnabled;
133 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
134 {
135 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
136 pVM->em.s.fGuruOnTripleFault = true;
137 }
138
139 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
140
141 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
142 * Whether to try correlate exit history in any context, detect hot spots and
143 * try optimize these using IEM if there are other exits close by. This
144 * overrides the context specific settings. */
145 bool fExitOptimizationEnabled = true;
146 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
147 AssertLogRelRCReturn(rc, rc);
148
149 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
150 * Whether to optimize exits in ring-0. Setting this to false will also disable
151 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
152 * capabilities of the host kernel, this optimization may be unavailable. */
153 bool fExitOptimizationEnabledR0 = true;
154 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
155 AssertLogRelRCReturn(rc, rc);
156 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
157
158 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
159 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
160 * hooks are in effect). */
161 /** @todo change the default to true here */
162 bool fExitOptimizationEnabledR0PreemptDisabled = true;
163 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
164 AssertLogRelRCReturn(rc, rc);
165 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
166
167 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
168 * Maximum number of instruction to let EMHistoryExec execute in one go. */
169 uint16_t cHistoryExecMaxInstructions = 8192;
170 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
171 AssertLogRelRCReturn(rc, rc);
172 if (cHistoryExecMaxInstructions < 16)
173 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
174
175 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
176 * Maximum number of instruction between exits during probing. */
177 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
178#ifdef RT_OS_WINDOWS
179 if (VM_IS_NEM_ENABLED(pVM))
180 cHistoryProbeMaxInstructionsWithoutExit = 32;
181#endif
182 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
183 cHistoryProbeMaxInstructionsWithoutExit);
184 AssertLogRelRCReturn(rc, rc);
185 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
186 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
187 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
188
189 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
190 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
191 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
192 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
193 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
194 cHistoryProbeMinInstructions);
195 AssertLogRelRCReturn(rc, rc);
196
197 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
198 {
199 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
200 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
201 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
202 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
203 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
204 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
205 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
206 }
207
208 /*
209 * Saved state.
210 */
211 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
212 NULL, NULL, NULL,
213 NULL, emR3Save, NULL,
214 NULL, emR3Load, NULL);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
219 {
220 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
221
222 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
223 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
224 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
225 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
226
227# define EM_REG_COUNTER(a, b, c) \
228 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
229 AssertRC(rc);
230
231# define EM_REG_COUNTER_USED(a, b, c) \
232 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
233 AssertRC(rc);
234
235# define EM_REG_PROFILE(a, b, c) \
236 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
237 AssertRC(rc);
238
239# define EM_REG_PROFILE_ADV(a, b, c) \
240 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
241 AssertRC(rc);
242
243 /*
244 * Statistics.
245 */
246#ifdef VBOX_WITH_STATISTICS
247 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
248 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
249
250 /* these should be considered for release statistics. */
251 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
252 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
253 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
254 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
255 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
256 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
257 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
258 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
259#endif /* VBOX_WITH_STATISTICS */
260 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
261 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
262#ifdef VBOX_WITH_STATISTICS
263 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
264 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
265 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
266 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
267 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
268 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
269#endif /* VBOX_WITH_STATISTICS */
270
271 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
272 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
273 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
274 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
275 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
276
277 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
278
279 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
280 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
281 AssertRC(rc);
282
283 /* History record statistics */
284 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
285 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
286 AssertRC(rc);
287
288 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
289 {
290 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
291 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
292 AssertRC(rc);
293 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
294 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
295 AssertRC(rc);
296 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
297 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
298 AssertRC(rc);
299 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
300 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
301 AssertRC(rc);
302 }
303
304 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
305 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
306 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
307 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
308 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
309 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
310 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
311 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
312 }
313
314 emR3InitDbg(pVM);
315 return VINF_SUCCESS;
316}
317
318
319/**
320 * Called when a VM initialization stage is completed.
321 *
322 * @returns VBox status code.
323 * @param pVM The cross context VM structure.
324 * @param enmWhat The initialization state that was completed.
325 */
326VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
327{
328 if (enmWhat == VMINITCOMPLETED_RING0)
329 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
330 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
331 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
332 return VINF_SUCCESS;
333}
334
335
336/**
337 * Applies relocations to data and code managed by this
338 * component. This function will be called at init and
339 * whenever the VMM need to relocate it self inside the GC.
340 *
341 * @param pVM The cross context VM structure.
342 */
343VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
344{
345 LogFlow(("EMR3Relocate\n"));
346 RT_NOREF(pVM);
347}
348
349
350/**
351 * Reset the EM state for a CPU.
352 *
353 * Called by EMR3Reset and hot plugging.
354 *
355 * @param pVCpu The cross context virtual CPU structure.
356 */
357VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
358{
359 /* Reset scheduling state. */
360 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
361
362 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
363 out of the HALTED state here so that enmPrevState doesn't end up as
364 HALTED when EMR3Execute returns. */
365 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
366 {
367 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
368 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
369 }
370}
371
372
373/**
374 * Reset notification.
375 *
376 * @param pVM The cross context VM structure.
377 */
378VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
379{
380 Log(("EMR3Reset: \n"));
381 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
382 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
383}
384
385
386/**
387 * Terminates the EM.
388 *
389 * Termination means cleaning up and freeing all resources,
390 * the VM it self is at this point powered off or suspended.
391 *
392 * @returns VBox status code.
393 * @param pVM The cross context VM structure.
394 */
395VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
396{
397 RT_NOREF(pVM);
398 return VINF_SUCCESS;
399}
400
401
402/**
403 * Execute state save operation.
404 *
405 * @returns VBox status code.
406 * @param pVM The cross context VM structure.
407 * @param pSSM SSM operation handle.
408 */
409static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
410{
411 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
412 {
413 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
414
415 SSMR3PutBool(pSSM, false /*fForceRAW*/);
416
417 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
418 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
419 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
420
421 /* Save mwait state. */
422 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
423 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
424 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
425 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
426 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
427 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
428 AssertRCReturn(rc, rc);
429 }
430 return VINF_SUCCESS;
431}
432
433
434/**
435 * Execute state load operation.
436 *
437 * @returns VBox status code.
438 * @param pVM The cross context VM structure.
439 * @param pSSM SSM operation handle.
440 * @param uVersion Data layout version.
441 * @param uPass The data pass.
442 */
443static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
444{
445 /*
446 * Validate version.
447 */
448 if ( uVersion > EM_SAVED_STATE_VERSION
449 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
450 {
451 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
452 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
453 }
454 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
455
456 /*
457 * Load the saved state.
458 */
459 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
460 {
461 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
462
463 bool fForceRAWIgnored;
464 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
465 AssertRCReturn(rc, rc);
466
467 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
468 {
469 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
470 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
471
472 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
473 }
474 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
475 {
476 /* Load mwait state. */
477 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
478 AssertRCReturn(rc, rc);
479 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
480 AssertRCReturn(rc, rc);
481 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
482 AssertRCReturn(rc, rc);
483 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
484 AssertRCReturn(rc, rc);
485 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
486 AssertRCReturn(rc, rc);
487 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
488 AssertRCReturn(rc, rc);
489 }
490 }
491 return VINF_SUCCESS;
492}
493
494
495/**
496 * Argument packet for emR3SetExecutionPolicy.
497 */
498struct EMR3SETEXECPOLICYARGS
499{
500 EMEXECPOLICY enmPolicy;
501 bool fEnforce;
502};
503
504
505/**
506 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
507 */
508static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
509{
510 /*
511 * Only the first CPU changes the variables.
512 */
513 if (pVCpu->idCpu == 0)
514 {
515 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
516 switch (pArgs->enmPolicy)
517 {
518 case EMEXECPOLICY_RECOMPILE_RING0:
519 case EMEXECPOLICY_RECOMPILE_RING3:
520 break;
521 case EMEXECPOLICY_IEM_ALL:
522 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
523
524 /* For making '.alliem 1' useful during debugging, transition the
525 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
526 for (VMCPUID i = 0; i < pVM->cCpus; i++)
527 {
528 PVMCPU pVCpuX = pVM->apCpusR3[i];
529 switch (pVCpuX->em.s.enmState)
530 {
531 case EMSTATE_DEBUG_GUEST_RAW:
532 case EMSTATE_DEBUG_GUEST_HM:
533 case EMSTATE_DEBUG_GUEST_NEM:
534 case EMSTATE_DEBUG_GUEST_REM:
535 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
536 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
537 break;
538 case EMSTATE_DEBUG_GUEST_IEM:
539 default:
540 break;
541 }
542 }
543 break;
544 default:
545 AssertFailedReturn(VERR_INVALID_PARAMETER);
546 }
547 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
548 }
549
550 /*
551 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
552 */
553 return pVCpu->em.s.enmState == EMSTATE_RAW
554 || pVCpu->em.s.enmState == EMSTATE_HM
555 || pVCpu->em.s.enmState == EMSTATE_NEM
556 || pVCpu->em.s.enmState == EMSTATE_IEM
557 || pVCpu->em.s.enmState == EMSTATE_REM
558 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
559 ? VINF_EM_RESCHEDULE
560 : VINF_SUCCESS;
561}
562
563
564/**
565 * Changes an execution scheduling policy parameter.
566 *
567 * This is used to enable or disable raw-mode / hardware-virtualization
568 * execution of user and supervisor code.
569 *
570 * @returns VINF_SUCCESS on success.
571 * @returns VINF_RESCHEDULE if a rescheduling might be required.
572 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
573 *
574 * @param pUVM The user mode VM handle.
575 * @param enmPolicy The scheduling policy to change.
576 * @param fEnforce Whether to enforce the policy or not.
577 */
578VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
579{
580 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
581 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
582 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
583
584 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
585 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
586}
587
588
589/**
590 * Queries an execution scheduling policy parameter.
591 *
592 * @returns VBox status code
593 * @param pUVM The user mode VM handle.
594 * @param enmPolicy The scheduling policy to query.
595 * @param pfEnforced Where to return the current value.
596 */
597VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
598{
599 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
600 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
601 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
602 PVM pVM = pUVM->pVM;
603 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
604
605 /* No need to bother EMTs with a query. */
606 switch (enmPolicy)
607 {
608 case EMEXECPOLICY_RECOMPILE_RING0:
609 case EMEXECPOLICY_RECOMPILE_RING3:
610 *pfEnforced = false;
611 break;
612 case EMEXECPOLICY_IEM_ALL:
613 *pfEnforced = pVM->em.s.fIemExecutesAll;
614 break;
615 default:
616 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
617 }
618
619 return VINF_SUCCESS;
620}
621
622
623/**
624 * Queries the main execution engine of the VM.
625 *
626 * @returns VBox status code
627 * @param pUVM The user mode VM handle.
628 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
629 */
630VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
631{
632 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
633 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
634
635 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
636 PVM pVM = pUVM->pVM;
637 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
638
639 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
640 return VINF_SUCCESS;
641}
642
643
644/**
645 * Raise a fatal error.
646 *
647 * Safely terminate the VM with full state report and stuff. This function
648 * will naturally never return.
649 *
650 * @param pVCpu The cross context virtual CPU structure.
651 * @param rc VBox status code.
652 */
653VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
654{
655 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
656 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
657}
658
659
660#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
661/**
662 * Gets the EM state name.
663 *
664 * @returns pointer to read only state name,
665 * @param enmState The state.
666 */
667static const char *emR3GetStateName(EMSTATE enmState)
668{
669 switch (enmState)
670 {
671 case EMSTATE_NONE: return "EMSTATE_NONE";
672 case EMSTATE_RAW: return "EMSTATE_RAW";
673 case EMSTATE_HM: return "EMSTATE_HM";
674 case EMSTATE_IEM: return "EMSTATE_IEM";
675 case EMSTATE_REM: return "EMSTATE_REM";
676 case EMSTATE_HALTED: return "EMSTATE_HALTED";
677 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
678 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
679 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
680 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
681 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
682 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
683 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
684 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
685 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
686 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
687 case EMSTATE_NEM: return "EMSTATE_NEM";
688 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
689 default: return "Unknown!";
690 }
691}
692#endif /* LOG_ENABLED || VBOX_STRICT */
693
694
695/**
696 * Handle pending ring-3 I/O port write.
697 *
698 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
699 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
700 *
701 * @returns Strict VBox status code.
702 * @param pVM The cross context VM structure.
703 * @param pVCpu The cross context virtual CPU structure.
704 */
705VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
706{
707 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
708
709 /* Get and clear the pending data. */
710 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
711 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
712 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
713 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
714 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
715
716 /* Assert sanity. */
717 switch (cbValue)
718 {
719 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
720 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
721 case 4: break;
722 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
723 }
724 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
725
726 /* Do the work.*/
727 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
728 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
729 if (IOM_SUCCESS(rcStrict))
730 {
731 pVCpu->cpum.GstCtx.rip += cbInstr;
732 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
733 }
734 return rcStrict;
735}
736
737
738/**
739 * Handle pending ring-3 I/O port write.
740 *
741 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
742 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
743 *
744 * @returns Strict VBox status code.
745 * @param pVM The cross context VM structure.
746 * @param pVCpu The cross context virtual CPU structure.
747 */
748VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
749{
750 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
751
752 /* Get and clear the pending data. */
753 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
754 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
755 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
756 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
757
758 /* Assert sanity. */
759 switch (cbValue)
760 {
761 case 1: break;
762 case 2: break;
763 case 4: break;
764 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
765 }
766 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
767 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
768
769 /* Do the work.*/
770 uint32_t uValue = 0;
771 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
772 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
773 if (IOM_SUCCESS(rcStrict))
774 {
775 if (cbValue == 4)
776 pVCpu->cpum.GstCtx.rax = uValue;
777 else if (cbValue == 2)
778 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
779 else
780 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
781 pVCpu->cpum.GstCtx.rip += cbInstr;
782 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
783 }
784 return rcStrict;
785}
786
787
788/**
789 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
790 * Worker for emR3ExecuteSplitLockInstruction}
791 */
792static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
793{
794 /* Only execute on the specified EMT. */
795 if (pVCpu == (PVMCPU)pvUser)
796 {
797 LogFunc(("\n"));
798 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
799 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
800 if (rcStrict == VINF_IEM_RAISED_XCPT)
801 rcStrict = VINF_SUCCESS;
802 return rcStrict;
803 }
804 RT_NOREF(pVM);
805 return VINF_SUCCESS;
806}
807
808
809/**
810 * Handle an instruction causing a split cacheline lock access in SMP VMs.
811 *
812 * Generally we only get here if the host has split-lock detection enabled and
813 * this caused an \#AC because of something the guest did. If we interpret the
814 * instruction as-is, we'll likely just repeat the split-lock access and
815 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
816 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
817 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
818 * disregard the lock prefix when emulating the instruction.
819 *
820 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
821 * feature when entering guest context, but the support for the feature isn't a
822 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
823 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
824 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
825 * propert detection to SUPDrv later if we find it necessary.
826 *
827 * @see @bugref{10052}
828 *
829 * @returns Strict VBox status code.
830 * @param pVM The cross context VM structure.
831 * @param pVCpu The cross context virtual CPU structure.
832 */
833VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
834{
835 LogFunc(("\n"));
836 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
837}
838
839
840/**
841 * Debug loop.
842 *
843 * @returns VBox status code for EM.
844 * @param pVM The cross context VM structure.
845 * @param pVCpu The cross context virtual CPU structure.
846 * @param rc Current EM VBox status code.
847 */
848static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
849{
850 for (;;)
851 {
852 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
853 const VBOXSTRICTRC rcLast = rc;
854
855 /*
856 * Debug related RC.
857 */
858 switch (VBOXSTRICTRC_VAL(rc))
859 {
860 /*
861 * Single step an instruction.
862 */
863 case VINF_EM_DBG_STEP:
864 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
865 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
866 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
867 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
868 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
869 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
870 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
871#ifdef VBOX_WITH_REM /** @todo fix me? */
872 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
873 rc = emR3RemStep(pVM, pVCpu);
874#endif
875 else
876 {
877 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
878 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
879 rc = VINF_EM_DBG_STEPPED;
880 }
881 break;
882
883 /*
884 * Simple events: stepped, breakpoint, stop/assertion.
885 */
886 case VINF_EM_DBG_STEPPED:
887 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
888 break;
889
890 case VINF_EM_DBG_BREAKPOINT:
891 rc = DBGFR3BpHit(pVM, pVCpu);
892 break;
893
894 case VINF_EM_DBG_STOP:
895 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
896 break;
897
898 case VINF_EM_DBG_EVENT:
899 rc = DBGFR3EventHandlePending(pVM, pVCpu);
900 break;
901
902 case VINF_EM_DBG_HYPER_STEPPED:
903 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
904 break;
905
906 case VINF_EM_DBG_HYPER_BREAKPOINT:
907 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
908 break;
909
910 case VINF_EM_DBG_HYPER_ASSERTION:
911 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
912 RTLogFlush(NULL);
913 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
914 break;
915
916 /*
917 * Guru meditation.
918 */
919 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
920 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
921 break;
922 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
923 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
924 break;
925 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
926 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
927 break;
928
929 default: /** @todo don't use default for guru, but make special errors code! */
930 {
931 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
932 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
933 break;
934 }
935 }
936
937 /*
938 * Process the result.
939 */
940 switch (VBOXSTRICTRC_VAL(rc))
941 {
942 /*
943 * Continue the debugging loop.
944 */
945 case VINF_EM_DBG_STEP:
946 case VINF_EM_DBG_STOP:
947 case VINF_EM_DBG_EVENT:
948 case VINF_EM_DBG_STEPPED:
949 case VINF_EM_DBG_BREAKPOINT:
950 case VINF_EM_DBG_HYPER_STEPPED:
951 case VINF_EM_DBG_HYPER_BREAKPOINT:
952 case VINF_EM_DBG_HYPER_ASSERTION:
953 break;
954
955 /*
956 * Resuming execution (in some form) has to be done here if we got
957 * a hypervisor debug event.
958 */
959 case VINF_SUCCESS:
960 case VINF_EM_RESUME:
961 case VINF_EM_SUSPEND:
962 case VINF_EM_RESCHEDULE:
963 case VINF_EM_RESCHEDULE_RAW:
964 case VINF_EM_RESCHEDULE_REM:
965 case VINF_EM_HALT:
966 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
967 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
968 if (rc == VINF_SUCCESS)
969 rc = VINF_EM_RESCHEDULE;
970 return rc;
971
972 /*
973 * The debugger isn't attached.
974 * We'll simply turn the thing off since that's the easiest thing to do.
975 */
976 case VERR_DBGF_NOT_ATTACHED:
977 switch (VBOXSTRICTRC_VAL(rcLast))
978 {
979 case VINF_EM_DBG_HYPER_STEPPED:
980 case VINF_EM_DBG_HYPER_BREAKPOINT:
981 case VINF_EM_DBG_HYPER_ASSERTION:
982 case VERR_TRPM_PANIC:
983 case VERR_TRPM_DONT_PANIC:
984 case VERR_VMM_RING0_ASSERTION:
985 case VERR_VMM_HYPER_CR3_MISMATCH:
986 case VERR_VMM_RING3_CALL_DISABLED:
987 return rcLast;
988 }
989 return VINF_EM_OFF;
990
991 /*
992 * Status codes terminating the VM in one or another sense.
993 */
994 case VINF_EM_TERMINATE:
995 case VINF_EM_OFF:
996 case VINF_EM_RESET:
997 case VINF_EM_NO_MEMORY:
998 case VINF_EM_RAW_STALE_SELECTOR:
999 case VINF_EM_RAW_IRET_TRAP:
1000 case VERR_TRPM_PANIC:
1001 case VERR_TRPM_DONT_PANIC:
1002 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1003 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1004 case VERR_VMM_RING0_ASSERTION:
1005 case VERR_VMM_HYPER_CR3_MISMATCH:
1006 case VERR_VMM_RING3_CALL_DISABLED:
1007 case VERR_INTERNAL_ERROR:
1008 case VERR_INTERNAL_ERROR_2:
1009 case VERR_INTERNAL_ERROR_3:
1010 case VERR_INTERNAL_ERROR_4:
1011 case VERR_INTERNAL_ERROR_5:
1012 case VERR_IPE_UNEXPECTED_STATUS:
1013 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1014 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1015 return rc;
1016
1017 /*
1018 * The rest is unexpected, and will keep us here.
1019 */
1020 default:
1021 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1022 break;
1023 }
1024 } /* debug for ever */
1025}
1026
1027
1028#if defined(VBOX_WITH_REM) || defined(DEBUG)
1029/**
1030 * Steps recompiled code.
1031 *
1032 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1033 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1034 *
1035 * @param pVM The cross context VM structure.
1036 * @param pVCpu The cross context virtual CPU structure.
1037 */
1038static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1039{
1040 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1041
1042 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1043
1044 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1045 return rc;
1046}
1047#endif /* VBOX_WITH_REM || DEBUG */
1048
1049
1050/**
1051 * Executes recompiled code.
1052 *
1053 * This function contains the recompiler version of the inner
1054 * execution loop (the outer loop being in EMR3ExecuteVM()).
1055 *
1056 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1057 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1058 *
1059 * @param pVM The cross context VM structure.
1060 * @param pVCpu The cross context virtual CPU structure.
1061 * @param pfFFDone Where to store an indicator telling whether or not
1062 * FFs were done before returning.
1063 *
1064 */
1065static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1066{
1067#ifdef LOG_ENABLED
1068 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1069
1070 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1071 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1072 else
1073 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1074#endif
1075 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1076
1077 /*
1078 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1079 * or the REM suggests raw-mode execution.
1080 */
1081 *pfFFDone = false;
1082 uint32_t cLoops = 0;
1083 int rc = VINF_SUCCESS;
1084 for (;;)
1085 {
1086 /*
1087 * Execute REM.
1088 */
1089 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1090 {
1091 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1092 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1093 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1094 }
1095 else
1096 {
1097 /* Give up this time slice; virtual time continues */
1098 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1099 RTThreadSleep(5);
1100 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1101 rc = VINF_SUCCESS;
1102 }
1103
1104 /*
1105 * Deal with high priority post execution FFs before doing anything
1106 * else. Sync back the state and leave the lock to be on the safe side.
1107 */
1108 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1109 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1110 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1111
1112 /*
1113 * Process the returned status code.
1114 */
1115 if (rc != VINF_SUCCESS)
1116 {
1117 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1118 break;
1119 if (rc != VINF_REM_INTERRUPED_FF)
1120 {
1121 /* Try dodge unimplemented IEM trouble by reschduling. */
1122 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1123 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1124 {
1125 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1126 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1127 {
1128 rc = VINF_EM_RESCHEDULE;
1129 break;
1130 }
1131 }
1132
1133 /*
1134 * Anything which is not known to us means an internal error
1135 * and the termination of the VM!
1136 */
1137 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1138 break;
1139 }
1140 }
1141
1142
1143 /*
1144 * Check and execute forced actions.
1145 *
1146 * Sync back the VM state and leave the lock before calling any of
1147 * these, you never know what's going to happen here.
1148 */
1149#ifdef VBOX_HIGH_RES_TIMERS_HACK
1150 TMTimerPollVoid(pVM, pVCpu);
1151#endif
1152 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1153 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1154 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1155 {
1156 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1157 rc = emR3ForcedActions(pVM, pVCpu, rc);
1158 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1159 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1160 if ( rc != VINF_SUCCESS
1161 && rc != VINF_EM_RESCHEDULE_REM)
1162 {
1163 *pfFFDone = true;
1164 break;
1165 }
1166 }
1167
1168 /*
1169 * Have to check if we can get back to fast execution mode every so often.
1170 */
1171 if (!(++cLoops & 7))
1172 {
1173 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1174 if ( enmCheck != EMSTATE_REM
1175 && enmCheck != EMSTATE_IEM_THEN_REM)
1176 {
1177 LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));
1178 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1179 return VINF_EM_RESCHEDULE;
1180 }
1181 Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));
1182 }
1183
1184 } /* The Inner Loop, recompiled execution mode version. */
1185
1186 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1187 return rc;
1188}
1189
1190
1191#ifdef DEBUG
1192
1193int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1194{
1195 EMSTATE enmOldState = pVCpu->em.s.enmState;
1196
1197 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1198
1199 Log(("Single step BEGIN:\n"));
1200 for (uint32_t i = 0; i < cIterations; i++)
1201 {
1202 DBGFR3PrgStep(pVCpu);
1203 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1204 emR3RemStep(pVM, pVCpu);
1205 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1206 break;
1207 }
1208 Log(("Single step END:\n"));
1209 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1210 pVCpu->em.s.enmState = enmOldState;
1211 return VINF_EM_RESCHEDULE;
1212}
1213
1214#endif /* DEBUG */
1215
1216
1217/**
1218 * Try execute the problematic code in IEM first, then fall back on REM if there
1219 * is too much of it or if IEM doesn't implement something.
1220 *
1221 * @returns Strict VBox status code from IEMExecLots.
1222 * @param pVM The cross context VM structure.
1223 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1224 * @param pfFFDone Force flags done indicator.
1225 *
1226 * @thread EMT(pVCpu)
1227 */
1228static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1229{
1230 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1231 *pfFFDone = false;
1232
1233 /*
1234 * Execute in IEM for a while.
1235 */
1236 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1237 {
1238 uint32_t cInstructions;
1239 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1240 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1241 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1242 if (rcStrict != VINF_SUCCESS)
1243 {
1244 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1245 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1246 break;
1247
1248 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1249 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1250 return rcStrict;
1251 }
1252
1253 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1254 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1255 {
1256 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1257 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1258 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1259 pVCpu->em.s.enmState = enmNewState;
1260 return VINF_SUCCESS;
1261 }
1262
1263 /*
1264 * Check for pending actions.
1265 */
1266 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1267 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1268 return VINF_SUCCESS;
1269 }
1270
1271 /*
1272 * Switch to REM.
1273 */
1274 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1275 pVCpu->em.s.enmState = EMSTATE_REM;
1276 return VINF_SUCCESS;
1277}
1278
1279
1280/**
1281 * Decides whether to execute RAW, HWACC or REM.
1282 *
1283 * @returns new EM state
1284 * @param pVM The cross context VM structure.
1285 * @param pVCpu The cross context virtual CPU structure.
1286 */
1287EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1288{
1289 /*
1290 * We stay in the wait for SIPI state unless explicitly told otherwise.
1291 */
1292 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1293 return EMSTATE_WAIT_SIPI;
1294
1295 /*
1296 * Execute everything in IEM?
1297 */
1298 if ( pVM->em.s.fIemExecutesAll
1299 || VM_IS_EXEC_ENGINE_IEM(pVM))
1300 return EMSTATE_IEM;
1301
1302 if (VM_IS_HM_ENABLED(pVM))
1303 {
1304 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1305 return EMSTATE_HM;
1306 }
1307 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1308 return EMSTATE_NEM;
1309
1310 /*
1311 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1312 * turns off monitoring features essential for raw mode!
1313 */
1314 return EMSTATE_IEM_THEN_REM;
1315}
1316
1317
1318/**
1319 * Executes all high priority post execution force actions.
1320 *
1321 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1322 * fatal error status code.
1323 *
1324 * @param pVM The cross context VM structure.
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param rc The current strict VBox status code rc.
1327 */
1328VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1329{
1330 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1331
1332 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1333 PDMCritSectBothFF(pVM, pVCpu);
1334
1335 /* Update CR3 (Nested Paging case for HM). */
1336 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1337 {
1338 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1339 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1340 if (RT_FAILURE(rc2))
1341 return rc2;
1342 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1343 }
1344
1345 /* IEM has pending work (typically memory write after INS instruction). */
1346 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1347 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1348
1349 /* IOM has pending work (comitting an I/O or MMIO write). */
1350 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1351 {
1352 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1353 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1354 { /* half likely, or at least it's a line shorter. */ }
1355 else if (rc == VINF_SUCCESS)
1356 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1357 else
1358 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1359 }
1360
1361 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1362 {
1363 if ( rc > VINF_EM_NO_MEMORY
1364 && rc <= VINF_EM_LAST)
1365 rc = VINF_EM_NO_MEMORY;
1366 }
1367
1368 return rc;
1369}
1370
1371
1372/**
1373 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1374 *
1375 * @returns VBox status code.
1376 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1377 * @param pVCpu The cross context virtual CPU structure.
1378 */
1379static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1380{
1381#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1382 /* Handle the "external interrupt" VM-exit intercept. */
1383 if (CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
1384 {
1385 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1386 AssertMsg( rcStrict != VINF_VMX_VMEXIT
1387 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1388 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1389 return VBOXSTRICTRC_TODO(rcStrict);
1390 }
1391#else
1392 RT_NOREF(pVCpu);
1393#endif
1394 return VINF_NO_CHANGE;
1395}
1396
1397
1398/**
1399 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1400 *
1401 * @returns VBox status code.
1402 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1403 * @param pVCpu The cross context virtual CPU structure.
1404 */
1405static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1406{
1407#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1408 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1409 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1410 {
1411 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1412 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1413 if (RT_SUCCESS(rcStrict))
1414 {
1415 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1416 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1417 return VBOXSTRICTRC_VAL(rcStrict);
1418 }
1419
1420 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1421 return VINF_EM_TRIPLE_FAULT;
1422 }
1423#else
1424 NOREF(pVCpu);
1425#endif
1426 return VINF_NO_CHANGE;
1427}
1428
1429
1430/**
1431 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1432 *
1433 * @returns VBox status code.
1434 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1435 * @param pVCpu The cross context virtual CPU structure.
1436 */
1437static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1438{
1439#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1440 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1441 {
1442 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1443 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1444 if (RT_SUCCESS(rcStrict))
1445 {
1446 Assert(rcStrict != VINF_SVM_VMEXIT);
1447 return VBOXSTRICTRC_VAL(rcStrict);
1448 }
1449 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1450 return VINF_EM_TRIPLE_FAULT;
1451 }
1452#else
1453 NOREF(pVCpu);
1454#endif
1455 return VINF_NO_CHANGE;
1456}
1457
1458
1459/**
1460 * Executes all pending forced actions.
1461 *
1462 * Forced actions can cause execution delays and execution
1463 * rescheduling. The first we deal with using action priority, so
1464 * that for instance pending timers aren't scheduled and ran until
1465 * right before execution. The rescheduling we deal with using
1466 * return codes. The same goes for VM termination, only in that case
1467 * we exit everything.
1468 *
1469 * @returns VBox status code of equal or greater importance/severity than rc.
1470 * The most important ones are: VINF_EM_RESCHEDULE,
1471 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1472 *
1473 * @param pVM The cross context VM structure.
1474 * @param pVCpu The cross context virtual CPU structure.
1475 * @param rc The current rc.
1476 *
1477 */
1478int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1479{
1480 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1481#ifdef VBOX_STRICT
1482 int rcIrq = VINF_SUCCESS;
1483#endif
1484 int rc2;
1485#define UPDATE_RC() \
1486 do { \
1487 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1488 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1489 break; \
1490 if (!rc || rc2 < rc) \
1491 rc = rc2; \
1492 } while (0)
1493 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1494
1495 /*
1496 * Post execution chunk first.
1497 */
1498 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1499 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1500 {
1501 /*
1502 * EMT Rendezvous (must be serviced before termination).
1503 */
1504 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1505 {
1506 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1507 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1508 UPDATE_RC();
1509 /** @todo HACK ALERT! The following test is to make sure EM+TM
1510 * thinks the VM is stopped/reset before the next VM state change
1511 * is made. We need a better solution for this, or at least make it
1512 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1513 * VINF_EM_SUSPEND). */
1514 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1515 {
1516 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1517 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1518 return rc;
1519 }
1520 }
1521
1522 /*
1523 * State change request (cleared by vmR3SetStateLocked).
1524 */
1525 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1526 {
1527 VMSTATE enmState = VMR3GetState(pVM);
1528 switch (enmState)
1529 {
1530 case VMSTATE_FATAL_ERROR:
1531 case VMSTATE_FATAL_ERROR_LS:
1532 case VMSTATE_GURU_MEDITATION:
1533 case VMSTATE_GURU_MEDITATION_LS:
1534 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1535 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1536 return VINF_EM_SUSPEND;
1537
1538 case VMSTATE_DESTROYING:
1539 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1540 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1541 return VINF_EM_TERMINATE;
1542
1543 default:
1544 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1545 }
1546 }
1547
1548 /*
1549 * Debugger Facility polling.
1550 */
1551 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1552 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1553 {
1554 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1555 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1556 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1557 * somewhere before we get here, I would think. */
1558 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1559 rc = rc2;
1560 else
1561 UPDATE_RC();
1562 }
1563
1564 /*
1565 * Postponed reset request.
1566 */
1567 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1568 {
1569 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1570 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1571 UPDATE_RC();
1572 }
1573
1574 /*
1575 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1576 */
1577 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1578 {
1579 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1580 UPDATE_RC();
1581 if (rc == VINF_EM_NO_MEMORY)
1582 return rc;
1583 }
1584
1585 /* check that we got them all */
1586 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1587 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1588 }
1589
1590 /*
1591 * Normal priority then.
1592 * (Executed in no particular order.)
1593 */
1594 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1595 {
1596 /*
1597 * PDM Queues are pending.
1598 */
1599 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1600 PDMR3QueueFlushAll(pVM);
1601
1602 /*
1603 * PDM DMA transfers are pending.
1604 */
1605 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1606 PDMR3DmaRun(pVM);
1607
1608 /*
1609 * EMT Rendezvous (make sure they are handled before the requests).
1610 */
1611 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1612 {
1613 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1614 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1615 UPDATE_RC();
1616 /** @todo HACK ALERT! The following test is to make sure EM+TM
1617 * thinks the VM is stopped/reset before the next VM state change
1618 * is made. We need a better solution for this, or at least make it
1619 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1620 * VINF_EM_SUSPEND). */
1621 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1622 {
1623 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1624 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1625 return rc;
1626 }
1627 }
1628
1629 /*
1630 * Requests from other threads.
1631 */
1632 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1633 {
1634 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1635 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1636 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1637 {
1638 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1639 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1640 return rc2;
1641 }
1642 UPDATE_RC();
1643 /** @todo HACK ALERT! The following test is to make sure EM+TM
1644 * thinks the VM is stopped/reset before the next VM state change
1645 * is made. We need a better solution for this, or at least make it
1646 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1647 * VINF_EM_SUSPEND). */
1648 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1649 {
1650 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1651 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1652 return rc;
1653 }
1654 }
1655
1656 /* check that we got them all */
1657 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1658 }
1659
1660 /*
1661 * Normal priority then. (per-VCPU)
1662 * (Executed in no particular order.)
1663 */
1664 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1665 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1666 {
1667 /*
1668 * Requests from other threads.
1669 */
1670 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1671 {
1672 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1673 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1674 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1675 {
1676 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1677 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1678 return rc2;
1679 }
1680 UPDATE_RC();
1681 /** @todo HACK ALERT! The following test is to make sure EM+TM
1682 * thinks the VM is stopped/reset before the next VM state change
1683 * is made. We need a better solution for this, or at least make it
1684 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1685 * VINF_EM_SUSPEND). */
1686 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1687 {
1688 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1689 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1690 return rc;
1691 }
1692 }
1693
1694 /* check that we got them all */
1695 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1696 }
1697
1698 /*
1699 * High priority pre execution chunk last.
1700 * (Executed in ascending priority order.)
1701 */
1702 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1703 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1704 {
1705 /*
1706 * Timers before interrupts.
1707 */
1708 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1709 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1710 TMR3TimerQueuesDo(pVM);
1711
1712 /*
1713 * Pick up asynchronously posted interrupts into the APIC.
1714 */
1715 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1716 APICUpdatePendingInterrupts(pVCpu);
1717
1718 /*
1719 * The instruction following an emulated STI should *always* be executed!
1720 *
1721 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1722 * the eip is the same as the inhibited instr address. Before we
1723 * are able to execute this instruction in raw mode (iret to
1724 * guest code) an external interrupt might force a world switch
1725 * again. Possibly allowing a guest interrupt to be dispatched
1726 * in the process. This could break the guest. Sounds very
1727 * unlikely, but such timing sensitive problem are not as rare as
1728 * you might think.
1729 */
1730 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1731 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1732 {
1733 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1734 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1735 {
1736 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1737 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1738 }
1739 else
1740 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1741 }
1742
1743 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1744 * delivered. */
1745
1746#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1747 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
1748 {
1749 /*
1750 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1751 * Takes priority over even SMI and INIT signals.
1752 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1753 */
1754 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1755 {
1756 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1757 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1758 UPDATE_RC();
1759 }
1760
1761 /*
1762 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1763 * Takes priority over "Traps on the previous instruction".
1764 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1765 */
1766 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1767 {
1768 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1769 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1770 UPDATE_RC();
1771 }
1772
1773 /*
1774 * VMX Nested-guest preemption timer VM-exit.
1775 * Takes priority over NMI-window VM-exits.
1776 */
1777 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1778 {
1779 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1780 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1781 UPDATE_RC();
1782 }
1783 }
1784#endif
1785
1786 /*
1787 * Guest event injection.
1788 */
1789 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1790 bool fWakeupPending = false;
1791 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1792 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1793 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
1794 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1795 {
1796 bool fInVmxNonRootMode;
1797 bool fInSvmHwvirtMode;
1798 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
1799 if (fInNestedGuest)
1800 {
1801 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1802 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1803 }
1804 else
1805 {
1806 fInVmxNonRootMode = false;
1807 fInSvmHwvirtMode = false;
1808 }
1809
1810 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
1811 if (fGif)
1812 {
1813#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1814 /*
1815 * VMX NMI-window VM-exit.
1816 * Takes priority over non-maskable interrupts (NMIs).
1817 * Interrupt shadows block NMI-window VM-exits.
1818 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1819 *
1820 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1821 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1822 */
1823 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1824 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1825 {
1826 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1827 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1828 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1829 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1830 && rc2 != VINF_VMX_VMEXIT
1831 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1832 UPDATE_RC();
1833 }
1834 else
1835#endif
1836 /*
1837 * NMIs (take priority over external interrupts).
1838 */
1839 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1840 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1841 {
1842#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1843 if ( fInVmxNonRootMode
1844 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1845 {
1846 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1847 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1848 UPDATE_RC();
1849 }
1850 else
1851#endif
1852#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1853 if ( fInSvmHwvirtMode
1854 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1855 {
1856 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1857 AssertMsg( rc2 != VINF_SVM_VMEXIT
1858 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1859 UPDATE_RC();
1860 }
1861 else
1862#endif
1863 {
1864 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1865 if (rc2 == VINF_SUCCESS)
1866 {
1867 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1868 fWakeupPending = true;
1869 if (pVM->em.s.fIemExecutesAll)
1870 rc2 = VINF_EM_RESCHEDULE;
1871 else
1872 {
1873 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1874 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1875 : VINF_EM_RESCHEDULE_REM;
1876 }
1877 }
1878 UPDATE_RC();
1879 }
1880 }
1881#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1882 /*
1883 * VMX Interrupt-window VM-exits.
1884 * Takes priority over external interrupts.
1885 */
1886 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1887 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1888 {
1889 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1890 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1891 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1892 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1893 && rc2 != VINF_VMX_VMEXIT
1894 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1895 UPDATE_RC();
1896 }
1897#endif
1898#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1899 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1900 * actually pending like we currently do. */
1901#endif
1902 /*
1903 * External interrupts.
1904 */
1905 else
1906 {
1907 /*
1908 * VMX: virtual interrupts takes priority over physical interrupts.
1909 * SVM: physical interrupts takes priority over virtual interrupts.
1910 */
1911 if ( fInVmxNonRootMode
1912 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1913 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1914 {
1915 /** @todo NSTVMX: virtual-interrupt delivery. */
1916 rc2 = VINF_SUCCESS;
1917 }
1918 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1919 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1920 {
1921 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1922 if (fInVmxNonRootMode)
1923 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1924 else if (fInSvmHwvirtMode)
1925 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1926 else
1927 rc2 = VINF_NO_CHANGE;
1928
1929 if (rc2 == VINF_NO_CHANGE)
1930 {
1931 bool fInjected = false;
1932 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1933 /** @todo this really isn't nice, should properly handle this */
1934 /* Note! This can still cause a VM-exit (on Intel). */
1935 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1936 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1937 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1938 fWakeupPending = true;
1939 if ( pVM->em.s.fIemExecutesAll
1940 && ( rc2 == VINF_EM_RESCHEDULE_REM
1941 || rc2 == VINF_EM_RESCHEDULE_HM
1942 || rc2 == VINF_EM_RESCHEDULE_RAW))
1943 {
1944 rc2 = VINF_EM_RESCHEDULE;
1945 }
1946#ifdef VBOX_STRICT
1947 if (fInjected)
1948 rcIrq = rc2;
1949#endif
1950 }
1951 UPDATE_RC();
1952 }
1953 else if ( fInSvmHwvirtMode
1954 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1955 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1956 {
1957 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1958 if (rc2 == VINF_NO_CHANGE)
1959 {
1960 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1961 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1962 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1963 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1964 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1965 rc2 = VINF_EM_RESCHEDULE;
1966#ifdef VBOX_STRICT
1967 rcIrq = rc2;
1968#endif
1969 }
1970 UPDATE_RC();
1971 }
1972 }
1973 }
1974 }
1975
1976 /*
1977 * Allocate handy pages.
1978 */
1979 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1980 {
1981 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1982 UPDATE_RC();
1983 }
1984
1985 /*
1986 * Debugger Facility request.
1987 */
1988 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1989 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1990 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
1991 {
1992 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1993 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1994 UPDATE_RC();
1995 }
1996
1997 /*
1998 * EMT Rendezvous (must be serviced before termination).
1999 */
2000 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2001 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2002 {
2003 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2004 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2005 UPDATE_RC();
2006 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2007 * stopped/reset before the next VM state change is made. We need a better
2008 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2009 * && rc >= VINF_EM_SUSPEND). */
2010 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2011 {
2012 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2013 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2014 return rc;
2015 }
2016 }
2017
2018 /*
2019 * State change request (cleared by vmR3SetStateLocked).
2020 */
2021 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2022 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2023 {
2024 VMSTATE enmState = VMR3GetState(pVM);
2025 switch (enmState)
2026 {
2027 case VMSTATE_FATAL_ERROR:
2028 case VMSTATE_FATAL_ERROR_LS:
2029 case VMSTATE_GURU_MEDITATION:
2030 case VMSTATE_GURU_MEDITATION_LS:
2031 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2032 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2033 return VINF_EM_SUSPEND;
2034
2035 case VMSTATE_DESTROYING:
2036 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2037 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2038 return VINF_EM_TERMINATE;
2039
2040 default:
2041 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2042 }
2043 }
2044
2045 /*
2046 * Out of memory? Since most of our fellow high priority actions may cause us
2047 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2048 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2049 * than us since we can terminate without allocating more memory.
2050 */
2051 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2052 {
2053 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2054 UPDATE_RC();
2055 if (rc == VINF_EM_NO_MEMORY)
2056 return rc;
2057 }
2058
2059 /*
2060 * If the virtual sync clock is still stopped, make TM restart it.
2061 */
2062 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2063 TMR3VirtualSyncFF(pVM, pVCpu);
2064
2065#ifdef DEBUG
2066 /*
2067 * Debug, pause the VM.
2068 */
2069 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2070 {
2071 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2072 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2073 return VINF_EM_SUSPEND;
2074 }
2075#endif
2076
2077 /* check that we got them all */
2078 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2079 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2080 }
2081
2082#undef UPDATE_RC
2083 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2084 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2085 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2086 return rc;
2087}
2088
2089
2090/**
2091 * Check if the preset execution time cap restricts guest execution scheduling.
2092 *
2093 * @returns true if allowed, false otherwise
2094 * @param pVM The cross context VM structure.
2095 * @param pVCpu The cross context virtual CPU structure.
2096 */
2097bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2098{
2099 uint64_t u64UserTime, u64KernelTime;
2100
2101 if ( pVM->uCpuExecutionCap != 100
2102 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2103 {
2104 uint64_t u64TimeNow = RTTimeMilliTS();
2105 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2106 {
2107 /* New time slice. */
2108 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2109 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2110 pVCpu->em.s.u64TimeSliceExec = 0;
2111 }
2112 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2113
2114 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2115 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2116 return false;
2117 }
2118 return true;
2119}
2120
2121
2122/**
2123 * Execute VM.
2124 *
2125 * This function is the main loop of the VM. The emulation thread
2126 * calls this function when the VM has been successfully constructed
2127 * and we're ready for executing the VM.
2128 *
2129 * Returning from this function means that the VM is turned off or
2130 * suspended (state already saved) and deconstruction is next in line.
2131 *
2132 * All interaction from other thread are done using forced actions
2133 * and signalling of the wait object.
2134 *
2135 * @returns VBox status code, informational status codes may indicate failure.
2136 * @param pVM The cross context VM structure.
2137 * @param pVCpu The cross context virtual CPU structure.
2138 */
2139VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2140{
2141 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2142 pVM,
2143 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2144 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2145 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2146 VM_ASSERT_EMT(pVM);
2147 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2148 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2149 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2150 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2151
2152 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2153 if (rc == 0)
2154 {
2155 /*
2156 * Start the virtual time.
2157 */
2158 TMR3NotifyResume(pVM, pVCpu);
2159
2160 /*
2161 * The Outer Main Loop.
2162 */
2163 bool fFFDone = false;
2164
2165 /* Reschedule right away to start in the right state. */
2166 rc = VINF_SUCCESS;
2167
2168 /* If resuming after a pause or a state load, restore the previous
2169 state or else we'll start executing code. Else, just reschedule. */
2170 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2171 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2172 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2173 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2174 else
2175 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2176 pVCpu->em.s.cIemThenRemInstructions = 0;
2177 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2178
2179 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2180 for (;;)
2181 {
2182 /*
2183 * Before we can schedule anything (we're here because
2184 * scheduling is required) we must service any pending
2185 * forced actions to avoid any pending action causing
2186 * immediate rescheduling upon entering an inner loop
2187 *
2188 * Do forced actions.
2189 */
2190 if ( !fFFDone
2191 && RT_SUCCESS(rc)
2192 && rc != VINF_EM_TERMINATE
2193 && rc != VINF_EM_OFF
2194 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2195 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2196 {
2197 rc = emR3ForcedActions(pVM, pVCpu, rc);
2198 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2199 }
2200 else if (fFFDone)
2201 fFFDone = false;
2202
2203 /*
2204 * Now what to do?
2205 */
2206 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2207 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2208 switch (rc)
2209 {
2210 /*
2211 * Keep doing what we're currently doing.
2212 */
2213 case VINF_SUCCESS:
2214 break;
2215
2216 /*
2217 * Reschedule - to raw-mode execution.
2218 */
2219/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2220 case VINF_EM_RESCHEDULE_RAW:
2221 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2222 AssertLogRelFailed();
2223 pVCpu->em.s.enmState = EMSTATE_NONE;
2224 break;
2225
2226 /*
2227 * Reschedule - to HM or NEM.
2228 */
2229 case VINF_EM_RESCHEDULE_HM:
2230 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2231 if (VM_IS_HM_ENABLED(pVM))
2232 {
2233 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2234 pVCpu->em.s.enmState = EMSTATE_HM;
2235 }
2236 else if (VM_IS_NEM_ENABLED(pVM))
2237 {
2238 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2239 pVCpu->em.s.enmState = EMSTATE_NEM;
2240 }
2241 else
2242 {
2243 AssertLogRelFailed();
2244 pVCpu->em.s.enmState = EMSTATE_NONE;
2245 }
2246 break;
2247
2248 /*
2249 * Reschedule - to recompiled execution.
2250 */
2251 case VINF_EM_RESCHEDULE_REM:
2252 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2253 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2254 enmOldState, EMSTATE_IEM_THEN_REM));
2255 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2256 {
2257 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2258 pVCpu->em.s.cIemThenRemInstructions = 0;
2259 }
2260 break;
2261
2262 /*
2263 * Resume.
2264 */
2265 case VINF_EM_RESUME:
2266 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2267 /* Don't reschedule in the halted or wait for SIPI case. */
2268 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2269 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2270 {
2271 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2272 break;
2273 }
2274 /* fall through and get scheduled. */
2275 RT_FALL_THRU();
2276
2277 /*
2278 * Reschedule.
2279 */
2280 case VINF_EM_RESCHEDULE:
2281 {
2282 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2283 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2284 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2285 pVCpu->em.s.cIemThenRemInstructions = 0;
2286 pVCpu->em.s.enmState = enmState;
2287 break;
2288 }
2289
2290 /*
2291 * Halted.
2292 */
2293 case VINF_EM_HALT:
2294 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2295 pVCpu->em.s.enmState = EMSTATE_HALTED;
2296 break;
2297
2298 /*
2299 * Switch to the wait for SIPI state (application processor only)
2300 */
2301 case VINF_EM_WAIT_SIPI:
2302 Assert(pVCpu->idCpu != 0);
2303 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2304 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2305 break;
2306
2307
2308 /*
2309 * Suspend.
2310 */
2311 case VINF_EM_SUSPEND:
2312 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2313 Assert(enmOldState != EMSTATE_SUSPENDED);
2314 pVCpu->em.s.enmPrevState = enmOldState;
2315 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2316 break;
2317
2318 /*
2319 * Reset.
2320 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2321 */
2322 case VINF_EM_RESET:
2323 {
2324 if (pVCpu->idCpu == 0)
2325 {
2326 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2327 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2328 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2329 pVCpu->em.s.cIemThenRemInstructions = 0;
2330 pVCpu->em.s.enmState = enmState;
2331 }
2332 else
2333 {
2334 /* All other VCPUs go into the wait for SIPI state. */
2335 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2336 }
2337 break;
2338 }
2339
2340 /*
2341 * Power Off.
2342 */
2343 case VINF_EM_OFF:
2344 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2345 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2346 TMR3NotifySuspend(pVM, pVCpu);
2347 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2348 return rc;
2349
2350 /*
2351 * Terminate the VM.
2352 */
2353 case VINF_EM_TERMINATE:
2354 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2355 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2356 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2357 TMR3NotifySuspend(pVM, pVCpu);
2358 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2359 return rc;
2360
2361
2362 /*
2363 * Out of memory, suspend the VM and stuff.
2364 */
2365 case VINF_EM_NO_MEMORY:
2366 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2367 Assert(enmOldState != EMSTATE_SUSPENDED);
2368 pVCpu->em.s.enmPrevState = enmOldState;
2369 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2370 TMR3NotifySuspend(pVM, pVCpu);
2371 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2372
2373 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2374 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2375 if (rc != VINF_EM_SUSPEND)
2376 {
2377 if (RT_SUCCESS_NP(rc))
2378 {
2379 AssertLogRelMsgFailed(("%Rrc\n", rc));
2380 rc = VERR_EM_INTERNAL_ERROR;
2381 }
2382 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2383 }
2384 return rc;
2385
2386 /*
2387 * Guest debug events.
2388 */
2389 case VINF_EM_DBG_STEPPED:
2390 case VINF_EM_DBG_STOP:
2391 case VINF_EM_DBG_EVENT:
2392 case VINF_EM_DBG_BREAKPOINT:
2393 case VINF_EM_DBG_STEP:
2394 if (enmOldState == EMSTATE_RAW)
2395 {
2396 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2397 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2398 }
2399 else if (enmOldState == EMSTATE_HM)
2400 {
2401 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2402 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2403 }
2404 else if (enmOldState == EMSTATE_NEM)
2405 {
2406 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2407 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2408 }
2409 else if (enmOldState == EMSTATE_REM)
2410 {
2411 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2412 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2413 }
2414 else
2415 {
2416 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2417 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2418 }
2419 break;
2420
2421 /*
2422 * Hypervisor debug events.
2423 */
2424 case VINF_EM_DBG_HYPER_STEPPED:
2425 case VINF_EM_DBG_HYPER_BREAKPOINT:
2426 case VINF_EM_DBG_HYPER_ASSERTION:
2427 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2428 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2429 break;
2430
2431 /*
2432 * Triple fault.
2433 */
2434 case VINF_EM_TRIPLE_FAULT:
2435 if (!pVM->em.s.fGuruOnTripleFault)
2436 {
2437 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2438 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2439 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2440 continue;
2441 }
2442 /* Else fall through and trigger a guru. */
2443 RT_FALL_THRU();
2444
2445 case VERR_VMM_RING0_ASSERTION:
2446 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2447 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2448 break;
2449
2450 /*
2451 * Any error code showing up here other than the ones we
2452 * know and process above are considered to be FATAL.
2453 *
2454 * Unknown warnings and informational status codes are also
2455 * included in this.
2456 */
2457 default:
2458 if (RT_SUCCESS_NP(rc))
2459 {
2460 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2461 rc = VERR_EM_INTERNAL_ERROR;
2462 }
2463 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2464 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2465 break;
2466 }
2467
2468 /*
2469 * Act on state transition.
2470 */
2471 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2472 if (enmOldState != enmNewState)
2473 {
2474 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2475
2476 /* Clear MWait flags and the unhalt FF. */
2477 if ( enmOldState == EMSTATE_HALTED
2478 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2479 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2480 && ( enmNewState == EMSTATE_RAW
2481 || enmNewState == EMSTATE_HM
2482 || enmNewState == EMSTATE_NEM
2483 || enmNewState == EMSTATE_REM
2484 || enmNewState == EMSTATE_IEM_THEN_REM
2485 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2486 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2487 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2488 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2489 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2490 {
2491 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2492 {
2493 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2494 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2495 }
2496 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2497 {
2498 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2499 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2500 }
2501 }
2502 }
2503 else
2504 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2505
2506 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2507 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2508
2509 /*
2510 * Act on the new state.
2511 */
2512 switch (enmNewState)
2513 {
2514 /*
2515 * Execute raw.
2516 */
2517 case EMSTATE_RAW:
2518 AssertLogRelMsgFailed(("%Rrc\n", rc));
2519 rc = VERR_EM_INTERNAL_ERROR;
2520 break;
2521
2522 /*
2523 * Execute hardware accelerated raw.
2524 */
2525 case EMSTATE_HM:
2526 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2527 break;
2528
2529 /*
2530 * Execute hardware accelerated raw.
2531 */
2532 case EMSTATE_NEM:
2533 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2534 break;
2535
2536 /*
2537 * Execute recompiled.
2538 */
2539 case EMSTATE_REM:
2540 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2541 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2542 break;
2543
2544 /*
2545 * Execute in the interpreter.
2546 */
2547 case EMSTATE_IEM:
2548 {
2549 uint32_t cInstructions = 0;
2550#if 0 /* For testing purposes. */
2551 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2552 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2553 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2554 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2555 rc = VINF_SUCCESS;
2556 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2557#endif
2558 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2559 if (pVM->em.s.fIemExecutesAll)
2560 {
2561 Assert(rc != VINF_EM_RESCHEDULE_REM);
2562 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2563 Assert(rc != VINF_EM_RESCHEDULE_HM);
2564#ifdef VBOX_HIGH_RES_TIMERS_HACK
2565 if (cInstructions < 2048)
2566 TMTimerPollVoid(pVM, pVCpu);
2567#endif
2568 }
2569 fFFDone = false;
2570 break;
2571 }
2572
2573 /*
2574 * Execute in IEM, hoping we can quickly switch aback to HM
2575 * or RAW execution. If our hopes fail, we go to REM.
2576 */
2577 case EMSTATE_IEM_THEN_REM:
2578 {
2579 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2580 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2581 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2582 break;
2583 }
2584
2585 /*
2586 * Application processor execution halted until SIPI.
2587 */
2588 case EMSTATE_WAIT_SIPI:
2589 /* no break */
2590 /*
2591 * hlt - execution halted until interrupt.
2592 */
2593 case EMSTATE_HALTED:
2594 {
2595 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2596 /* If HM (or someone else) store a pending interrupt in
2597 TRPM, it must be dispatched ASAP without any halting.
2598 Anything pending in TRPM has been accepted and the CPU
2599 should already be the right state to receive it. */
2600 if (TRPMHasTrap(pVCpu))
2601 rc = VINF_EM_RESCHEDULE;
2602 /* MWAIT has a special extension where it's woken up when
2603 an interrupt is pending even when IF=0. */
2604 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2605 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2606 {
2607 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2608 if (rc == VINF_SUCCESS)
2609 {
2610 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2611 APICUpdatePendingInterrupts(pVCpu);
2612
2613 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2614 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2615 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2616 {
2617 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2618 rc = VINF_EM_RESCHEDULE;
2619 }
2620 }
2621 }
2622 else
2623 {
2624 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2625 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2626 check VMCPU_FF_UPDATE_APIC here. */
2627 if ( rc == VINF_SUCCESS
2628 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2629 {
2630 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2631 rc = VINF_EM_RESCHEDULE;
2632 }
2633 }
2634
2635 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2636 break;
2637 }
2638
2639 /*
2640 * Suspended - return to VM.cpp.
2641 */
2642 case EMSTATE_SUSPENDED:
2643 TMR3NotifySuspend(pVM, pVCpu);
2644 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2645 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2646 return VINF_EM_SUSPEND;
2647
2648 /*
2649 * Debugging in the guest.
2650 */
2651 case EMSTATE_DEBUG_GUEST_RAW:
2652 case EMSTATE_DEBUG_GUEST_HM:
2653 case EMSTATE_DEBUG_GUEST_NEM:
2654 case EMSTATE_DEBUG_GUEST_IEM:
2655 case EMSTATE_DEBUG_GUEST_REM:
2656 TMR3NotifySuspend(pVM, pVCpu);
2657 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2658 TMR3NotifyResume(pVM, pVCpu);
2659 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2660 break;
2661
2662 /*
2663 * Debugging in the hypervisor.
2664 */
2665 case EMSTATE_DEBUG_HYPER:
2666 {
2667 TMR3NotifySuspend(pVM, pVCpu);
2668 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2669
2670 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2671 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2672 if (rc != VINF_SUCCESS)
2673 {
2674 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2675 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2676 else
2677 {
2678 /* switch to guru meditation mode */
2679 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2680 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2681 VMMR3FatalDump(pVM, pVCpu, rc);
2682 }
2683 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2684 return rc;
2685 }
2686
2687 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2688 TMR3NotifyResume(pVM, pVCpu);
2689 break;
2690 }
2691
2692 /*
2693 * Guru meditation takes place in the debugger.
2694 */
2695 case EMSTATE_GURU_MEDITATION:
2696 {
2697 TMR3NotifySuspend(pVM, pVCpu);
2698 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2699 VMMR3FatalDump(pVM, pVCpu, rc);
2700 emR3Debug(pVM, pVCpu, rc);
2701 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2702 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2703 return rc;
2704 }
2705
2706 /*
2707 * The states we don't expect here.
2708 */
2709 case EMSTATE_NONE:
2710 case EMSTATE_TERMINATING:
2711 default:
2712 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2713 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2714 TMR3NotifySuspend(pVM, pVCpu);
2715 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2716 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2717 return VERR_EM_INTERNAL_ERROR;
2718 }
2719 } /* The Outer Main Loop */
2720 }
2721 else
2722 {
2723 /*
2724 * Fatal error.
2725 */
2726 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2727 TMR3NotifySuspend(pVM, pVCpu);
2728 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2729 VMMR3FatalDump(pVM, pVCpu, rc);
2730 emR3Debug(pVM, pVCpu, rc);
2731 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2732 /** @todo change the VM state! */
2733 return rc;
2734 }
2735
2736 /* not reached */
2737}
2738
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette