VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 102947

Last change on this file since 102947 was 102947, checked in by vboxsync, 10 months ago

VMM/EM: Check whether it is possible to reschedule after IEMExecLots() has returned when in EMSTATE_IEM or the guest might never get out of IEM under certain circumstances (nested paging disabled for instance which disables unrestricted execution as well). Likely a r157542 regression, bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 113.5 KB
Line 
1/* $Id: EM.cpp 102947 2024-01-18 10:21:28Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RmExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/err.h>
73#include "VMMTracing.h"
74
75#include <iprt/asm.h>
76#include <iprt/string.h>
77#include <iprt/stream.h>
78#include <iprt/thread.h>
79
80#include "EMInline.h"
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
88#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
89static const char *emR3GetStateName(EMSTATE enmState);
90#endif
91static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
92
93
94/**
95 * Initializes the EM.
96 *
97 * @returns VBox status code.
98 * @param pVM The cross context VM structure.
99 */
100VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
101{
102 LogFlow(("EMR3Init\n"));
103 /*
104 * Assert alignment and sizes.
105 */
106 AssertCompileMemberAlignment(VM, em.s, 32);
107 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
108 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
109 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
110
111 /*
112 * Init the structure.
113 */
114 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
115 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
116
117 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
118#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) && !defined(VBOX_VMM_TARGET_ARMV8)
119 true
120#else
121 false
122#endif
123 );
124 AssertLogRelRCReturn(rc, rc);
125
126 bool fEnabled;
127 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->em.s.fGuruOnTripleFault = !fEnabled;
130 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
131 {
132 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
133 pVM->em.s.fGuruOnTripleFault = true;
134 }
135
136 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
137
138 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
139 * Whether to try correlate exit history in any context, detect hot spots and
140 * try optimize these using IEM if there are other exits close by. This
141 * overrides the context specific settings. */
142 bool fExitOptimizationEnabled = true;
143 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
144 AssertLogRelRCReturn(rc, rc);
145
146 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
147 * Whether to optimize exits in ring-0. Setting this to false will also disable
148 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
149 * capabilities of the host kernel, this optimization may be unavailable. */
150 bool fExitOptimizationEnabledR0 = true;
151 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
152 AssertLogRelRCReturn(rc, rc);
153 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
154
155 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
156 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
157 * hooks are in effect). */
158 /** @todo change the default to true here */
159 bool fExitOptimizationEnabledR0PreemptDisabled = true;
160 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
161 AssertLogRelRCReturn(rc, rc);
162 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
163
164 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
165 * Maximum number of instruction to let EMHistoryExec execute in one go. */
166 uint16_t cHistoryExecMaxInstructions = 8192;
167 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryExecMaxInstructions < 16)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
171
172 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
173 * Maximum number of instruction between exits during probing. */
174 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
175#ifdef RT_OS_WINDOWS
176 if (VM_IS_NEM_ENABLED(pVM))
177 cHistoryProbeMaxInstructionsWithoutExit = 32;
178#endif
179 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
180 cHistoryProbeMaxInstructionsWithoutExit);
181 AssertLogRelRCReturn(rc, rc);
182 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
183 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
184 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
185
186 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
187 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
188 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
189 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
190 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
191 cHistoryProbeMinInstructions);
192 AssertLogRelRCReturn(rc, rc);
193
194 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
195 {
196 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
197 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
198 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
199 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
200 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
201 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
202 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
203 }
204
205#ifdef VBOX_WITH_IEM_RECOMPILER
206 /** @cfgm{/EM/IemRecompiled, bool, true}
207 * Whether IEM bulk execution is recompiled or interpreted. */
208 rc = CFGMR3QueryBoolDef(pCfgEM, "IemRecompiled", &pVM->em.s.fIemRecompiled, true);
209 AssertLogRelRCReturn(rc, rc);
210#endif
211
212 /*
213 * Saved state.
214 */
215 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
216 NULL, NULL, NULL,
217 NULL, emR3Save, NULL,
218 NULL, emR3Load, NULL);
219 if (RT_FAILURE(rc))
220 return rc;
221
222 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
223 {
224 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
225
226 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
227 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
228 pVCpu->em.s.msTimeSliceStart = 0; /* paranoia */
229 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
230
231# define EM_REG_COUNTER(a, b, c) \
232 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
233 AssertRC(rc);
234
235# define EM_REG_COUNTER_USED(a, b, c) \
236 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
237 AssertRC(rc);
238
239# define EM_REG_PROFILE(a, b, c) \
240 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
241 AssertRC(rc);
242
243# define EM_REG_PROFILE_ADV(a, b, c) \
244 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
245 AssertRC(rc);
246
247 /*
248 * Statistics.
249 */
250#ifdef VBOX_WITH_STATISTICS
251 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
252 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
253
254 /* these should be considered for release statistics. */
255 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
256 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
257 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
258#endif
259 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
260 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
261#ifdef VBOX_WITH_STATISTICS
262 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
263 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
264 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
265#endif
266 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
267 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
268#ifdef VBOX_WITH_STATISTICS
269 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
270#endif
271
272 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
273 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
274 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
275 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RecompilerExecute (excluding FFs).");
276
277 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
278
279 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
280 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
281 AssertRC(rc);
282
283 /* History record statistics */
284 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
285 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
286 AssertRC(rc);
287
288 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
289 {
290 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
291 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
292 AssertRC(rc);
293 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
294 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
295 AssertRC(rc);
296 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
297 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
298 AssertRC(rc);
299 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
300 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
301 AssertRC(rc);
302 }
303
304 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
305 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
306 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
307 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
308 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
309 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
310 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
311 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
312 }
313
314 emR3InitDbg(pVM);
315 return VINF_SUCCESS;
316}
317
318
319/**
320 * Called when a VM initialization stage is completed.
321 *
322 * @returns VBox status code.
323 * @param pVM The cross context VM structure.
324 * @param enmWhat The initialization state that was completed.
325 */
326VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
327{
328 if (enmWhat == VMINITCOMPLETED_RING0)
329 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
330 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
331 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
332 return VINF_SUCCESS;
333}
334
335
336/**
337 * Applies relocations to data and code managed by this
338 * component. This function will be called at init and
339 * whenever the VMM need to relocate it self inside the GC.
340 *
341 * @param pVM The cross context VM structure.
342 */
343VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
344{
345 LogFlow(("EMR3Relocate\n"));
346 RT_NOREF(pVM);
347}
348
349
350/**
351 * Reset the EM state for a CPU.
352 *
353 * Called by EMR3Reset and hot plugging.
354 *
355 * @param pVCpu The cross context virtual CPU structure.
356 */
357VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
358{
359 /* Reset scheduling state. */
360 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
361
362 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
363 out of the HALTED state here so that enmPrevState doesn't end up as
364 HALTED when EMR3Execute returns. */
365 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
366 {
367 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
368 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
369 }
370}
371
372
373/**
374 * Reset notification.
375 *
376 * @param pVM The cross context VM structure.
377 */
378VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
379{
380 Log(("EMR3Reset: \n"));
381 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
382 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
383}
384
385
386/**
387 * Terminates the EM.
388 *
389 * Termination means cleaning up and freeing all resources,
390 * the VM it self is at this point powered off or suspended.
391 *
392 * @returns VBox status code.
393 * @param pVM The cross context VM structure.
394 */
395VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
396{
397 RT_NOREF(pVM);
398 return VINF_SUCCESS;
399}
400
401
402/**
403 * Execute state save operation.
404 *
405 * @returns VBox status code.
406 * @param pVM The cross context VM structure.
407 * @param pSSM SSM operation handle.
408 */
409static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
410{
411 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
412 {
413 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
414
415 SSMR3PutBool(pSSM, false /*fForceRAW*/);
416
417 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
418 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
419 SSMR3PutU32(pSSM,
420 pVCpu->em.s.enmPrevState == EMSTATE_NONE
421 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED
422 || pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
423 ? pVCpu->em.s.enmPrevState : EMSTATE_NONE);
424
425 /* Save mwait state. */
426 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
427 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
428 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
429 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
430 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
431 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
432 AssertRCReturn(rc, rc);
433 }
434 return VINF_SUCCESS;
435}
436
437
438/**
439 * Execute state load operation.
440 *
441 * @returns VBox status code.
442 * @param pVM The cross context VM structure.
443 * @param pSSM SSM operation handle.
444 * @param uVersion Data layout version.
445 * @param uPass The data pass.
446 */
447static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
448{
449 /*
450 * Validate version.
451 */
452 if ( uVersion > EM_SAVED_STATE_VERSION
453 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
454 {
455 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
456 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
457 }
458 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
459
460 /*
461 * Load the saved state.
462 */
463 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
464 {
465 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
466
467 bool fForceRAWIgnored;
468 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
469 AssertRCReturn(rc, rc);
470
471 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
472 {
473 /* We are only intereseted in two enmPrevState values for use when
474 EMR3ExecuteVM is called.
475 Since ~r157540. only these two and EMSTATE_NONE are saved. */
476 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
477 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
478 if ( pVCpu->em.s.enmPrevState != EMSTATE_WAIT_SIPI
479 && pVCpu->em.s.enmPrevState != EMSTATE_HALTED)
480 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
481
482 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
483 }
484 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
485 {
486 /* Load mwait state. */
487 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
488 AssertRCReturn(rc, rc);
489 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
490 AssertRCReturn(rc, rc);
491 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
492 AssertRCReturn(rc, rc);
493 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
494 AssertRCReturn(rc, rc);
495 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
496 AssertRCReturn(rc, rc);
497 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
498 AssertRCReturn(rc, rc);
499 }
500 }
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Argument packet for emR3SetExecutionPolicy.
507 */
508struct EMR3SETEXECPOLICYARGS
509{
510 EMEXECPOLICY enmPolicy;
511 bool fEnforce;
512};
513
514
515/**
516 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
517 */
518static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
519{
520 /*
521 * Only the first CPU changes the variables.
522 */
523 if (pVCpu->idCpu == 0)
524 {
525 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
526 switch (pArgs->enmPolicy)
527 {
528 case EMEXECPOLICY_IEM_ALL:
529 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
530
531 /* For making '.alliem 1' useful during debugging, transition the
532 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
533 for (VMCPUID i = 0; i < pVM->cCpus; i++)
534 {
535 PVMCPU pVCpuX = pVM->apCpusR3[i];
536 switch (pVCpuX->em.s.enmState)
537 {
538 case EMSTATE_DEBUG_GUEST_RECOMPILER:
539 if (pVM->em.s.fIemRecompiled)
540 break;
541 RT_FALL_THROUGH();
542 case EMSTATE_DEBUG_GUEST_RAW:
543 case EMSTATE_DEBUG_GUEST_HM:
544 case EMSTATE_DEBUG_GUEST_NEM:
545 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
546 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
547 break;
548 case EMSTATE_DEBUG_GUEST_IEM:
549 default:
550 break;
551 }
552 }
553 break;
554
555 case EMEXECPOLICY_IEM_RECOMPILED:
556 pVM->em.s.fIemRecompiled = pArgs->fEnforce;
557 break;
558
559 default:
560 AssertFailedReturn(VERR_INVALID_PARAMETER);
561 }
562 Log(("EM: Set execution policy: fIemExecutesAll=%RTbool fIemRecompiled=%RTbool\n",
563 pVM->em.s.fIemExecutesAll, pVM->em.s.fIemRecompiled));
564 }
565
566 /*
567 * Force rescheduling if in HM, NEM, IEM/interpreter or IEM/recompiler.
568 */
569 Assert(pVCpu->em.s.enmState != EMSTATE_RAW_OBSOLETE);
570 return pVCpu->em.s.enmState == EMSTATE_HM
571 || pVCpu->em.s.enmState == EMSTATE_NEM
572 || pVCpu->em.s.enmState == EMSTATE_IEM
573 || pVCpu->em.s.enmState == EMSTATE_RECOMPILER
574 ? VINF_EM_RESCHEDULE
575 : VINF_SUCCESS;
576}
577
578
579/**
580 * Changes an execution scheduling policy parameter.
581 *
582 * This is used to enable or disable raw-mode / hardware-virtualization
583 * execution of user and supervisor code.
584 *
585 * @returns VINF_SUCCESS on success.
586 * @returns VINF_RESCHEDULE if a rescheduling might be required.
587 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
588 *
589 * @param pUVM The user mode VM handle.
590 * @param enmPolicy The scheduling policy to change.
591 * @param fEnforce Whether to enforce the policy or not.
592 */
593VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
594{
595 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
596 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
597 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
598
599 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
600 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
601}
602
603
604/**
605 * Queries an execution scheduling policy parameter.
606 *
607 * @returns VBox status code
608 * @param pUVM The user mode VM handle.
609 * @param enmPolicy The scheduling policy to query.
610 * @param pfEnforced Where to return the current value.
611 */
612VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
613{
614 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
615 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
616 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
617 PVM pVM = pUVM->pVM;
618 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
619
620 /* No need to bother EMTs with a query. */
621 switch (enmPolicy)
622 {
623 case EMEXECPOLICY_IEM_ALL:
624 *pfEnforced = pVM->em.s.fIemExecutesAll;
625 break;
626 case EMEXECPOLICY_IEM_RECOMPILED:
627 *pfEnforced = pVM->em.s.fIemRecompiled;
628 break;
629 default:
630 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
631 }
632
633 return VINF_SUCCESS;
634}
635
636
637/**
638 * Queries the main execution engine of the VM.
639 *
640 * @returns VBox status code
641 * @param pUVM The user mode VM handle.
642 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
643 */
644VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
645{
646 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
647 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
648
649 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
650 PVM pVM = pUVM->pVM;
651 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
652
653 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
654 return VINF_SUCCESS;
655}
656
657
658/**
659 * Raise a fatal error.
660 *
661 * Safely terminate the VM with full state report and stuff. This function
662 * will naturally never return.
663 *
664 * @param pVCpu The cross context virtual CPU structure.
665 * @param rc VBox status code.
666 */
667VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
668{
669 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
670 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
671}
672
673
674#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
675/**
676 * Gets the EM state name.
677 *
678 * @returns pointer to read only state name,
679 * @param enmState The state.
680 */
681static const char *emR3GetStateName(EMSTATE enmState)
682{
683 switch (enmState)
684 {
685 case EMSTATE_NONE: return "EMSTATE_NONE";
686 case EMSTATE_RAW_OBSOLETE: return "EMSTATE_RAW_OBSOLETE";
687 case EMSTATE_HM: return "EMSTATE_HM";
688 case EMSTATE_IEM: return "EMSTATE_IEM";
689 case EMSTATE_RECOMPILER: return "EMSTATE_RECOMPILER";
690 case EMSTATE_HALTED: return "EMSTATE_HALTED";
691 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
692 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
693 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
694 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
695 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
696 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
697 case EMSTATE_DEBUG_GUEST_RECOMPILER: return "EMSTATE_DEBUG_GUEST_RECOMPILER";
698 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
699 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
700 case EMSTATE_IEM_THEN_REM_OBSOLETE: return "EMSTATE_IEM_THEN_REM_OBSOLETE";
701 case EMSTATE_NEM: return "EMSTATE_NEM";
702 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
703 default: return "Unknown!";
704 }
705}
706#endif /* LOG_ENABLED || VBOX_STRICT */
707
708
709#if !defined(VBOX_VMM_TARGET_ARMV8)
710/**
711 * Handle pending ring-3 I/O port write.
712 *
713 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
714 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
715 *
716 * @returns Strict VBox status code.
717 * @param pVM The cross context VM structure.
718 * @param pVCpu The cross context virtual CPU structure.
719 */
720VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
721{
722 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
723
724 /* Get and clear the pending data. */
725 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
726 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
727 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
728 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
729 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
730
731 /* Assert sanity. */
732 switch (cbValue)
733 {
734 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
735 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
736 case 4: break;
737 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
738 }
739 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
740
741 /* Do the work.*/
742 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
743 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
744 if (IOM_SUCCESS(rcStrict))
745 {
746 pVCpu->cpum.GstCtx.rip += cbInstr;
747 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
748 }
749 return rcStrict;
750}
751
752
753/**
754 * Handle pending ring-3 I/O port write.
755 *
756 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
757 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
758 *
759 * @returns Strict VBox status code.
760 * @param pVM The cross context VM structure.
761 * @param pVCpu The cross context virtual CPU structure.
762 */
763VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
764{
765 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
766
767 /* Get and clear the pending data. */
768 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
769 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
770 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
771 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
772
773 /* Assert sanity. */
774 switch (cbValue)
775 {
776 case 1: break;
777 case 2: break;
778 case 4: break;
779 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
780 }
781 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
782 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
783
784 /* Do the work.*/
785 uint32_t uValue = 0;
786 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
787 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
788 if (IOM_SUCCESS(rcStrict))
789 {
790 if (cbValue == 4)
791 pVCpu->cpum.GstCtx.rax = uValue;
792 else if (cbValue == 2)
793 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
794 else
795 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
796 pVCpu->cpum.GstCtx.rip += cbInstr;
797 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
798 }
799 return rcStrict;
800}
801
802
803/**
804 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
805 * Worker for emR3ExecuteSplitLockInstruction}
806 */
807static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
808{
809 /* Only execute on the specified EMT. */
810 if (pVCpu == (PVMCPU)pvUser)
811 {
812 LogFunc(("\n"));
813 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
814 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
815 if (rcStrict == VINF_IEM_RAISED_XCPT)
816 rcStrict = VINF_SUCCESS;
817 return rcStrict;
818 }
819 RT_NOREF(pVM);
820 return VINF_SUCCESS;
821}
822
823
824/**
825 * Handle an instruction causing a split cacheline lock access in SMP VMs.
826 *
827 * Generally we only get here if the host has split-lock detection enabled and
828 * this caused an \#AC because of something the guest did. If we interpret the
829 * instruction as-is, we'll likely just repeat the split-lock access and
830 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
831 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
832 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
833 * disregard the lock prefix when emulating the instruction.
834 *
835 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
836 * feature when entering guest context, but the support for the feature isn't a
837 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
838 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
839 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
840 * propert detection to SUPDrv later if we find it necessary.
841 *
842 * @see @bugref{10052}
843 *
844 * @returns Strict VBox status code.
845 * @param pVM The cross context VM structure.
846 * @param pVCpu The cross context virtual CPU structure.
847 */
848VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
849{
850 LogFunc(("\n"));
851 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
852}
853#endif /* VBOX_VMM_TARGET_ARMV8 */
854
855
856/**
857 * Debug loop.
858 *
859 * @returns VBox status code for EM.
860 * @param pVM The cross context VM structure.
861 * @param pVCpu The cross context virtual CPU structure.
862 * @param rc Current EM VBox status code.
863 */
864static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
865{
866 for (;;)
867 {
868 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
869 const VBOXSTRICTRC rcLast = rc;
870
871 /*
872 * Debug related RC.
873 */
874 switch (VBOXSTRICTRC_VAL(rc))
875 {
876 /*
877 * Single step an instruction.
878 */
879 case VINF_EM_DBG_STEP:
880 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
881 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
882 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
883#if !defined(VBOX_VMM_TARGET_ARMV8)
884 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
885 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
886#endif
887 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
888 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
889 else
890 {
891 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
892 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
893 rc = VINF_EM_DBG_STEPPED;
894 }
895 break;
896
897 /*
898 * Simple events: stepped, breakpoint, stop/assertion.
899 */
900 case VINF_EM_DBG_STEPPED:
901 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
902 break;
903
904 case VINF_EM_DBG_BREAKPOINT:
905 rc = DBGFR3BpHit(pVM, pVCpu);
906 break;
907
908 case VINF_EM_DBG_STOP:
909 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
910 break;
911
912 case VINF_EM_DBG_EVENT:
913 rc = DBGFR3EventHandlePending(pVM, pVCpu);
914 break;
915
916 case VINF_EM_DBG_HYPER_STEPPED:
917 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
918 break;
919
920 case VINF_EM_DBG_HYPER_BREAKPOINT:
921 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
922 break;
923
924 case VINF_EM_DBG_HYPER_ASSERTION:
925 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
926 RTLogFlush(NULL);
927 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
928 break;
929
930 /*
931 * Guru meditation.
932 */
933 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
934 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
935 break;
936 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
937 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
938 break;
939
940 default: /** @todo don't use default for guru, but make special errors code! */
941 {
942 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
943 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
944 break;
945 }
946 }
947
948 /*
949 * Process the result.
950 */
951 switch (VBOXSTRICTRC_VAL(rc))
952 {
953 /*
954 * Continue the debugging loop.
955 */
956 case VINF_EM_DBG_STEP:
957 case VINF_EM_DBG_STOP:
958 case VINF_EM_DBG_EVENT:
959 case VINF_EM_DBG_STEPPED:
960 case VINF_EM_DBG_BREAKPOINT:
961 case VINF_EM_DBG_HYPER_STEPPED:
962 case VINF_EM_DBG_HYPER_BREAKPOINT:
963 case VINF_EM_DBG_HYPER_ASSERTION:
964 break;
965
966 /*
967 * Resuming execution (in some form) has to be done here if we got
968 * a hypervisor debug event.
969 */
970 case VINF_SUCCESS:
971 case VINF_EM_RESUME:
972 case VINF_EM_SUSPEND:
973 case VINF_EM_RESCHEDULE:
974 case VINF_EM_RESCHEDULE_REM:
975 case VINF_EM_HALT:
976 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
977 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
978 if (rc == VINF_SUCCESS)
979 rc = VINF_EM_RESCHEDULE;
980 return rc;
981
982 /*
983 * The debugger isn't attached.
984 * We'll simply turn the thing off since that's the easiest thing to do.
985 */
986 case VERR_DBGF_NOT_ATTACHED:
987 switch (VBOXSTRICTRC_VAL(rcLast))
988 {
989 case VINF_EM_DBG_HYPER_STEPPED:
990 case VINF_EM_DBG_HYPER_BREAKPOINT:
991 case VINF_EM_DBG_HYPER_ASSERTION:
992 case VERR_TRPM_PANIC:
993 case VERR_TRPM_DONT_PANIC:
994 case VERR_VMM_RING0_ASSERTION:
995 case VERR_VMM_HYPER_CR3_MISMATCH:
996 case VERR_VMM_RING3_CALL_DISABLED:
997 return rcLast;
998 }
999 return VINF_EM_OFF;
1000
1001 /*
1002 * Status codes terminating the VM in one or another sense.
1003 */
1004 case VINF_EM_TERMINATE:
1005 case VINF_EM_OFF:
1006 case VINF_EM_RESET:
1007 case VINF_EM_NO_MEMORY:
1008 case VINF_EM_RAW_STALE_SELECTOR:
1009 case VINF_EM_RAW_IRET_TRAP:
1010 case VERR_TRPM_PANIC:
1011 case VERR_TRPM_DONT_PANIC:
1012 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1013 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1014 case VERR_VMM_RING0_ASSERTION:
1015 case VERR_VMM_HYPER_CR3_MISMATCH:
1016 case VERR_VMM_RING3_CALL_DISABLED:
1017 case VERR_INTERNAL_ERROR:
1018 case VERR_INTERNAL_ERROR_2:
1019 case VERR_INTERNAL_ERROR_3:
1020 case VERR_INTERNAL_ERROR_4:
1021 case VERR_INTERNAL_ERROR_5:
1022 case VERR_IPE_UNEXPECTED_STATUS:
1023 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1024 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1025 return rc;
1026
1027 /*
1028 * The rest is unexpected, and will keep us here.
1029 */
1030 default:
1031 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1032 break;
1033 }
1034 } /* debug for ever */
1035}
1036
1037
1038/**
1039 * Executes recompiled code.
1040 *
1041 * This function contains the recompiler version of the inner
1042 * execution loop (the outer loop being in EMR3ExecuteVM()).
1043 *
1044 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1045 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1046 *
1047 * @param pVM The cross context VM structure.
1048 * @param pVCpu The cross context virtual CPU structure.
1049 * @param pfFFDone Where to store an indicator telling whether or not
1050 * FFs were done before returning.
1051 *
1052 */
1053static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1054{
1055 STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a);
1056#ifdef VBOX_VMM_TARGET_ARMV8
1057 LogFlow(("emR3RecompilerExecute/%u: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64));
1058#else
1059 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
1060#endif
1061
1062 /*
1063 * Loop till we get a forced action which returns anything but VINF_SUCCESS.
1064 */
1065 *pfFFDone = false;
1066 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1067 for (;;)
1068 {
1069#ifdef LOG_ENABLED
1070# if defined(VBOX_VMM_TARGET_ARMV8)
1071 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1072# else
1073 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1074 Log(("EMR%d: %04X:%08RX64 RSP=%08RX64 IF=%d CR0=%x eflags=%x\n", CPUMGetGuestCPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel,
1075 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF,
1076 (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1077 else
1078 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1079# endif
1080#endif
1081
1082 /*
1083 * Execute.
1084 */
1085 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1086 {
1087 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1088#ifdef VBOX_WITH_IEM_RECOMPILER
1089 if (pVM->em.s.fIemRecompiled)
1090 rcStrict = IEMExecRecompiler(pVM, pVCpu);
1091 else
1092#endif
1093 rcStrict = IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/);
1094 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1095 }
1096 else
1097 {
1098 /* Give up this time slice; virtual time continues */
1099 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1100 RTThreadSleep(5);
1101 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1102 rcStrict = VINF_SUCCESS;
1103 }
1104
1105 /*
1106 * Deal with high priority post execution FFs before doing anything
1107 * else. Sync back the state and leave the lock to be on the safe side.
1108 */
1109 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1110 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1111 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
1112
1113 /*
1114 * Process the returned status code.
1115 */
1116 if (rcStrict != VINF_SUCCESS)
1117 {
1118#if 0
1119 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST))
1120 break;
1121 /* Fatal error: */
1122#endif
1123 break;
1124 }
1125
1126
1127 /*
1128 * Check and execute forced actions.
1129 *
1130 * Sync back the VM state and leave the lock before calling any of
1131 * these, you never know what's going to happen here.
1132 */
1133#ifdef VBOX_HIGH_RES_TIMERS_HACK
1134 TMTimerPollVoid(pVM, pVCpu);
1135#endif
1136 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1137 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1138 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1139 {
1140 rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
1141 VBOXVMM_EM_FF_ALL_RET(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1142 if ( rcStrict != VINF_SUCCESS
1143 && rcStrict != VINF_EM_RESCHEDULE_REM)
1144 {
1145 *pfFFDone = true;
1146 break;
1147 }
1148 }
1149
1150 /*
1151 * Check if we can switch back to the main execution engine now.
1152 */
1153#if !defined(VBOX_VMM_TARGET_ARMV8)
1154 if (VM_IS_HM_ENABLED(pVM))
1155 {
1156 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1157 {
1158 *pfFFDone = true;
1159 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1160 break;
1161 }
1162 }
1163 else
1164#endif
1165 if (VM_IS_NEM_ENABLED(pVM))
1166 {
1167 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1168 {
1169 *pfFFDone = true;
1170 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1171 break;
1172 }
1173 }
1174
1175 } /* The Inner Loop, recompiled execution mode version. */
1176
1177 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatREMTotal, a);
1178 return rcStrict;
1179}
1180
1181
1182/**
1183 * Decides whether to execute HM, NEM, IEM/interpreter or IEM/recompiler.
1184 *
1185 * @returns new EM state
1186 * @param pVM The cross context VM structure.
1187 * @param pVCpu The cross context virtual CPU structure.
1188 */
1189EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1190{
1191 /*
1192 * We stay in the wait for SIPI state unless explicitly told otherwise.
1193 */
1194 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1195 return EMSTATE_WAIT_SIPI;
1196
1197 /*
1198 * Execute everything in IEM?
1199 */
1200 if ( pVM->em.s.fIemExecutesAll
1201 || VM_IS_EXEC_ENGINE_IEM(pVM))
1202#ifdef VBOX_WITH_IEM_RECOMPILER
1203 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1204#else
1205 return EMSTATE_IEM;
1206#endif
1207
1208#if !defined(VBOX_VMM_TARGET_ARMV8)
1209 if (VM_IS_HM_ENABLED(pVM))
1210 {
1211 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1212 return EMSTATE_HM;
1213 }
1214 else
1215#endif
1216 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1217 return EMSTATE_NEM;
1218
1219 /*
1220 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1221 * turns off monitoring features essential for raw mode!
1222 */
1223#ifdef VBOX_WITH_IEM_RECOMPILER
1224 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1225#else
1226 return EMSTATE_IEM;
1227#endif
1228}
1229
1230
1231/**
1232 * Executes all high priority post execution force actions.
1233 *
1234 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1235 * fatal error status code.
1236 *
1237 * @param pVM The cross context VM structure.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param rc The current strict VBox status code rc.
1240 */
1241VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1242{
1243 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1244
1245 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1246 PDMCritSectBothFF(pVM, pVCpu);
1247
1248#if !defined(VBOX_VMM_TARGET_ARMV8)
1249 /* Update CR3 (Nested Paging case for HM). */
1250 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1251 {
1252 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1253 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1254 if (RT_FAILURE(rc2))
1255 return rc2;
1256 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1257 }
1258#endif
1259
1260 /* IEM has pending work (typically memory write after INS instruction). */
1261 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1262 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1263
1264 /* IOM has pending work (comitting an I/O or MMIO write). */
1265 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1266 {
1267 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1268 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1269 { /* half likely, or at least it's a line shorter. */ }
1270 else if (rc == VINF_SUCCESS)
1271 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1272 else
1273 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1274 }
1275
1276 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1277 {
1278 if ( rc > VINF_EM_NO_MEMORY
1279 && rc <= VINF_EM_LAST)
1280 rc = VINF_EM_NO_MEMORY;
1281 }
1282
1283 return rc;
1284}
1285
1286
1287#if !defined(VBOX_VMM_TARGET_ARMV8)
1288/**
1289 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1290 *
1291 * @returns VBox status code.
1292 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1293 * @param pVCpu The cross context virtual CPU structure.
1294 */
1295static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1296{
1297#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1298 /* Handle the "external interrupt" VM-exit intercept. */
1299 if ( CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1300 && !CPUMIsGuestVmxExitCtlsSet(&pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1301 {
1302 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1303 AssertMsg( rcStrict != VINF_VMX_VMEXIT /* VM-exit should have been converted to VINF_SUCCESS. */
1304 && rcStrict != VINF_NO_CHANGE
1305 && rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1306 return VBOXSTRICTRC_VAL(rcStrict);
1307 }
1308#else
1309 RT_NOREF(pVCpu);
1310#endif
1311 return VINF_NO_CHANGE;
1312}
1313
1314
1315/**
1316 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1317 *
1318 * @returns VBox status code.
1319 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1320 * @param pVCpu The cross context virtual CPU structure.
1321 */
1322static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1323{
1324#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1325 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1326 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1327 {
1328 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1329 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1330 if (RT_SUCCESS(rcStrict))
1331 {
1332 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1333 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1334 return VBOXSTRICTRC_VAL(rcStrict);
1335 }
1336
1337 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1338 return VINF_EM_TRIPLE_FAULT;
1339 }
1340#else
1341 NOREF(pVCpu);
1342#endif
1343 return VINF_NO_CHANGE;
1344}
1345
1346
1347/**
1348 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1349 *
1350 * @returns VBox status code.
1351 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1352 * @param pVCpu The cross context virtual CPU structure.
1353 */
1354static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1355{
1356#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1357 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1358 {
1359 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1360 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1361 if (RT_SUCCESS(rcStrict))
1362 {
1363 Assert(rcStrict != VINF_SVM_VMEXIT);
1364 return VBOXSTRICTRC_VAL(rcStrict);
1365 }
1366 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1367 return VINF_EM_TRIPLE_FAULT;
1368 }
1369#else
1370 NOREF(pVCpu);
1371#endif
1372 return VINF_NO_CHANGE;
1373}
1374#endif
1375
1376
1377/**
1378 * Executes all pending forced actions.
1379 *
1380 * Forced actions can cause execution delays and execution
1381 * rescheduling. The first we deal with using action priority, so
1382 * that for instance pending timers aren't scheduled and ran until
1383 * right before execution. The rescheduling we deal with using
1384 * return codes. The same goes for VM termination, only in that case
1385 * we exit everything.
1386 *
1387 * @returns VBox status code of equal or greater importance/severity than rc.
1388 * The most important ones are: VINF_EM_RESCHEDULE,
1389 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1390 *
1391 * @param pVM The cross context VM structure.
1392 * @param pVCpu The cross context virtual CPU structure.
1393 * @param rc The current rc.
1394 *
1395 */
1396int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1397{
1398 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1399#ifdef VBOX_STRICT
1400 int rcIrq = VINF_SUCCESS;
1401#endif
1402 int rc2;
1403#define UPDATE_RC() \
1404 do { \
1405 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1406 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1407 break; \
1408 if (!rc || rc2 < rc) \
1409 rc = rc2; \
1410 } while (0)
1411 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1412
1413 /*
1414 * Post execution chunk first.
1415 */
1416 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1417 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1418 {
1419 /*
1420 * EMT Rendezvous (must be serviced before termination).
1421 */
1422 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1423 {
1424 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1425 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1426 UPDATE_RC();
1427 /** @todo HACK ALERT! The following test is to make sure EM+TM
1428 * thinks the VM is stopped/reset before the next VM state change
1429 * is made. We need a better solution for this, or at least make it
1430 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1431 * VINF_EM_SUSPEND). */
1432 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1433 {
1434 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1435 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1436 return rc;
1437 }
1438 }
1439
1440 /*
1441 * State change request (cleared by vmR3SetStateLocked).
1442 */
1443 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1444 {
1445 VMSTATE enmState = VMR3GetState(pVM);
1446 switch (enmState)
1447 {
1448 case VMSTATE_FATAL_ERROR:
1449 case VMSTATE_FATAL_ERROR_LS:
1450 case VMSTATE_GURU_MEDITATION:
1451 case VMSTATE_GURU_MEDITATION_LS:
1452 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1453 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1454 return VINF_EM_SUSPEND;
1455
1456 case VMSTATE_DESTROYING:
1457 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1458 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1459 return VINF_EM_TERMINATE;
1460
1461 default:
1462 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1463 }
1464 }
1465
1466 /*
1467 * Debugger Facility polling.
1468 */
1469 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1470 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1471 {
1472 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1473 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1474 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1475 * somewhere before we get here, I would think. */
1476 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1477 rc = rc2;
1478 else
1479 UPDATE_RC();
1480 }
1481
1482 /*
1483 * Postponed reset request.
1484 */
1485 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1486 {
1487 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1488 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1489 UPDATE_RC();
1490 }
1491
1492 /*
1493 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1494 */
1495 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1496 {
1497 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1498 UPDATE_RC();
1499 if (rc == VINF_EM_NO_MEMORY)
1500 return rc;
1501 }
1502
1503 /* check that we got them all */
1504 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1505 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1506 }
1507
1508 /*
1509 * Normal priority then.
1510 * (Executed in no particular order.)
1511 */
1512 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1513 {
1514 /*
1515 * PDM Queues are pending.
1516 */
1517 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1518 PDMR3QueueFlushAll(pVM);
1519
1520 /*
1521 * PDM DMA transfers are pending.
1522 */
1523 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1524 PDMR3DmaRun(pVM);
1525
1526 /*
1527 * EMT Rendezvous (make sure they are handled before the requests).
1528 */
1529 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1530 {
1531 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1532 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1533 UPDATE_RC();
1534 /** @todo HACK ALERT! The following test is to make sure EM+TM
1535 * thinks the VM is stopped/reset before the next VM state change
1536 * is made. We need a better solution for this, or at least make it
1537 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1538 * VINF_EM_SUSPEND). */
1539 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1540 {
1541 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1542 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1543 return rc;
1544 }
1545 }
1546
1547 /*
1548 * Requests from other threads.
1549 */
1550 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1551 {
1552 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1553 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1554 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1555 {
1556 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1557 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1558 return rc2;
1559 }
1560 UPDATE_RC();
1561 /** @todo HACK ALERT! The following test is to make sure EM+TM
1562 * thinks the VM is stopped/reset before the next VM state change
1563 * is made. We need a better solution for this, or at least make it
1564 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1565 * VINF_EM_SUSPEND). */
1566 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1567 {
1568 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1569 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1570 return rc;
1571 }
1572 }
1573
1574 /* check that we got them all */
1575 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1576 }
1577
1578 /*
1579 * Normal priority then. (per-VCPU)
1580 * (Executed in no particular order.)
1581 */
1582 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1583 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1584 {
1585 /*
1586 * Requests from other threads.
1587 */
1588 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1589 {
1590 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1591 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1592 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1593 {
1594 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1595 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1596 return rc2;
1597 }
1598 UPDATE_RC();
1599 /** @todo HACK ALERT! The following test is to make sure EM+TM
1600 * thinks the VM is stopped/reset before the next VM state change
1601 * is made. We need a better solution for this, or at least make it
1602 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1603 * VINF_EM_SUSPEND). */
1604 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1605 {
1606 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1607 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1608 return rc;
1609 }
1610 }
1611
1612 /* check that we got them all */
1613 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1614 }
1615
1616 /*
1617 * High priority pre execution chunk last.
1618 * (Executed in ascending priority order.)
1619 */
1620 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1621 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1622 {
1623 /*
1624 * Timers before interrupts.
1625 */
1626 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1627 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1628 TMR3TimerQueuesDo(pVM);
1629
1630#if !defined(VBOX_VMM_TARGET_ARMV8)
1631 /*
1632 * Pick up asynchronously posted interrupts into the APIC.
1633 */
1634 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1635 APICUpdatePendingInterrupts(pVCpu);
1636
1637 /*
1638 * The instruction following an emulated STI should *always* be executed!
1639 *
1640 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1641 * the eip is the same as the inhibited instr address. Before we
1642 * are able to execute this instruction in raw mode (iret to
1643 * guest code) an external interrupt might force a world switch
1644 * again. Possibly allowing a guest interrupt to be dispatched
1645 * in the process. This could break the guest. Sounds very
1646 * unlikely, but such timing sensitive problem are not as rare as
1647 * you might think.
1648 *
1649 * Note! This used to be a force action flag. Can probably ditch this code.
1650 */
1651 /** @todo r=bird: the clearing case will *never* be taken here as
1652 * CPUMIsInInterruptShadow already makes sure the RIPs matches. */
1653 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1654 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1655 {
1656 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1657 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1658 {
1659 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1660 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1661 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1662 }
1663 else
1664 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1665 }
1666
1667 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1668 * delivered. */
1669
1670# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1671 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1672 | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW))
1673 {
1674 /*
1675 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1676 * Takes priority over even SMI and INIT signals.
1677 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1678 */
1679 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1680 {
1681 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1682 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1683 UPDATE_RC();
1684 }
1685
1686 /*
1687 * APIC write emulation MAY have a caused a VM-exit.
1688 * If it did cause a VM-exit, there's no point checking the other VMX non-root mode FFs here.
1689 */
1690 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
1691 {
1692 /*
1693 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1694 * Takes priority over "Traps on the previous instruction".
1695 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1696 */
1697 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1698 {
1699 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1700 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1701 UPDATE_RC();
1702 }
1703 /*
1704 * VMX Nested-guest preemption timer VM-exit.
1705 * Takes priority over NMI-window VM-exits.
1706 */
1707 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1708 {
1709 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1710 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1711 UPDATE_RC();
1712 }
1713 /*
1714 * VMX interrupt-window and NMI-window VM-exits.
1715 * Takes priority over non-maskable interrupts (NMIs) and external interrupts respectively.
1716 * If we are in an interrupt shadow or if we already in the process of delivering
1717 * an event then these VM-exits cannot occur.
1718 *
1719 * Interrupt shadows block NMI-window VM-exits.
1720 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1721 *
1722 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1723 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1724 * See Intel spec. 6.7 "Nonmaskable Interrupt (NMI)".
1725 */
1726 else if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1727 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)
1728 && !TRPMHasTrap(pVCpu))
1729 {
1730 /*
1731 * VMX NMI-window VM-exit.
1732 */
1733 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1734 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1735 {
1736 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1737 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1738 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1739 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1740 && rc2 != VINF_VMX_VMEXIT
1741 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1742 UPDATE_RC();
1743 }
1744 /*
1745 * VMX interrupt-window VM-exit.
1746 * This is a bit messy with the way the code below is currently structured,
1747 * but checking VMCPU_FF_INTERRUPT_NMI here (combined with CPUMAreInterruptsInhibitedByNmi
1748 * already checked at this point) should allow a pending NMI to be delivered prior to
1749 * causing an interrupt-window VM-exit.
1750 */
1751 /** @todo Restructure this later to happen after injecting NMI/causing NMI-exit, see
1752 * code in VMX R0 event delivery. */
1753 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1754 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1755 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1756 {
1757 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1758 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1759 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1760 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1761 && rc2 != VINF_VMX_VMEXIT
1762 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1763 UPDATE_RC();
1764 }
1765 }
1766 }
1767
1768 /*
1769 * Interrupt-window and NMI-window force flags might still be pending if we didn't actually cause
1770 * a VM-exit above. They will get cleared eventually when ANY nested-guest VM-exit occurs.
1771 * However, the force flags asserted below MUST have been cleared at this point.
1772 */
1773 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1774 }
1775# endif
1776
1777 /*
1778 * Guest event injection.
1779 */
1780 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1781 bool fWakeupPending = false;
1782 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1783 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1784 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1785 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1786 && (!rc || rc >= VINF_EM_RESCHEDULE_EXEC_ENGINE)
1787 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1788 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1789 {
1790 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1791 {
1792 bool fInVmxNonRootMode;
1793 bool fInSvmHwvirtMode;
1794 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1795 {
1796 fInVmxNonRootMode = false;
1797 fInSvmHwvirtMode = false;
1798 }
1799 else
1800 {
1801 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1802 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1803 }
1804
1805 /*
1806 * NMIs (take priority over external interrupts).
1807 */
1808 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1809 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1810 {
1811# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1812 if ( fInVmxNonRootMode
1813 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1814 {
1815 /* We MUST clear the NMI force-flag here, see @bugref{10318#c19}. */
1816 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1817 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1818 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1819 UPDATE_RC();
1820 }
1821 else
1822# endif
1823# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1824 if ( fInSvmHwvirtMode
1825 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1826 {
1827 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1828 AssertMsg( rc2 != VINF_SVM_VMEXIT
1829 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1830 UPDATE_RC();
1831 }
1832 else
1833# endif
1834 {
1835 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1836 if (rc2 == VINF_SUCCESS)
1837 {
1838 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1839 fWakeupPending = true;
1840# if 0 /* HMR3IsActive is not reliable (esp. after restore), just return VINF_EM_RESCHEDULE. */
1841 if (pVM->em.s.fIemExecutesAll)
1842 rc2 = VINF_EM_RESCHEDULE;
1843 else
1844 {
1845 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1846 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1847 : VINF_EM_RESCHEDULE_REM;
1848 }
1849# else
1850 rc2 = VINF_EM_RESCHEDULE;
1851# endif
1852 }
1853 UPDATE_RC();
1854 }
1855 }
1856# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1857 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1858 * actually pending like we currently do. */
1859# endif
1860 /*
1861 * External interrupts.
1862 */
1863 else
1864 {
1865 /*
1866 * VMX: virtual interrupts takes priority over physical interrupts.
1867 * SVM: physical interrupts takes priority over virtual interrupts.
1868 */
1869 if ( fInVmxNonRootMode
1870 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1871 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1872 {
1873 /** @todo NSTVMX: virtual-interrupt delivery. */
1874 rc2 = VINF_SUCCESS;
1875 }
1876 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1877 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1878 {
1879 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1880 if (fInVmxNonRootMode)
1881 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1882 else if (fInSvmHwvirtMode)
1883 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1884 else
1885 rc2 = VINF_NO_CHANGE;
1886
1887 if (rc2 == VINF_NO_CHANGE)
1888 {
1889 bool fInjected = false;
1890 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1891 /** @todo this really isn't nice, should properly handle this */
1892 /* Note! This can still cause a VM-exit (on Intel). */
1893 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1894 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1895 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1896 fWakeupPending = true;
1897 if ( pVM->em.s.fIemExecutesAll
1898 && ( rc2 == VINF_EM_RESCHEDULE_REM
1899 || rc2 == VINF_EM_RESCHEDULE_EXEC_ENGINE))
1900 rc2 = VINF_EM_RESCHEDULE;
1901# ifdef VBOX_STRICT
1902 if (fInjected)
1903 rcIrq = rc2;
1904# endif
1905 }
1906 UPDATE_RC();
1907 }
1908 else if ( fInSvmHwvirtMode
1909 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1910 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1911 {
1912 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1913 if (rc2 == VINF_NO_CHANGE)
1914 {
1915 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1916 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1917 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1918 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1919 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1920 rc2 = VINF_EM_RESCHEDULE;
1921# ifdef VBOX_STRICT
1922 rcIrq = rc2;
1923# endif
1924 }
1925 UPDATE_RC();
1926 }
1927 }
1928 } /* CPUMGetGuestGif */
1929 }
1930
1931#else /* VBOX_VMM_TARGET_ARMV8 */
1932 bool fWakeupPending = false;
1933
1934 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VTIMER_ACTIVATED))
1935 {
1936 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VTIMER_ACTIVATED);
1937
1938 fWakeupPending = true;
1939 rc2 = VINF_EM_RESCHEDULE;
1940 UPDATE_RC();
1941 }
1942#endif /* VBOX_VMM_TARGET_ARMV8 */
1943
1944 /*
1945 * Allocate handy pages.
1946 */
1947 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1948 {
1949 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1950 UPDATE_RC();
1951 }
1952
1953 /*
1954 * Debugger Facility request.
1955 */
1956 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1957 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1958 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
1959 {
1960 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1961 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1962 UPDATE_RC();
1963 }
1964
1965 /*
1966 * EMT Rendezvous (must be serviced before termination).
1967 */
1968 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1969 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1970 {
1971 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1972 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1973 UPDATE_RC();
1974 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1975 * stopped/reset before the next VM state change is made. We need a better
1976 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1977 * && rc >= VINF_EM_SUSPEND). */
1978 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1979 {
1980 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1981 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1982 return rc;
1983 }
1984 }
1985
1986 /*
1987 * State change request (cleared by vmR3SetStateLocked).
1988 */
1989 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1990 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1991 {
1992 VMSTATE enmState = VMR3GetState(pVM);
1993 switch (enmState)
1994 {
1995 case VMSTATE_FATAL_ERROR:
1996 case VMSTATE_FATAL_ERROR_LS:
1997 case VMSTATE_GURU_MEDITATION:
1998 case VMSTATE_GURU_MEDITATION_LS:
1999 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2000 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2001 return VINF_EM_SUSPEND;
2002
2003 case VMSTATE_DESTROYING:
2004 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2005 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2006 return VINF_EM_TERMINATE;
2007
2008 default:
2009 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2010 }
2011 }
2012
2013 /*
2014 * Out of memory? Since most of our fellow high priority actions may cause us
2015 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2016 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2017 * than us since we can terminate without allocating more memory.
2018 */
2019 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2020 {
2021 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2022 UPDATE_RC();
2023 if (rc == VINF_EM_NO_MEMORY)
2024 return rc;
2025 }
2026
2027 /*
2028 * If the virtual sync clock is still stopped, make TM restart it.
2029 */
2030 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2031 TMR3VirtualSyncFF(pVM, pVCpu);
2032
2033#ifdef DEBUG
2034 /*
2035 * Debug, pause the VM.
2036 */
2037 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2038 {
2039 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2040 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2041 return VINF_EM_SUSPEND;
2042 }
2043#endif
2044
2045 /* check that we got them all */
2046 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2047#if defined(VBOX_VMM_TARGET_ARMV8)
2048 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_DBGF));
2049#else
2050 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2051#endif
2052 }
2053
2054#undef UPDATE_RC
2055 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2056 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2057 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2058 return rc;
2059}
2060
2061
2062/**
2063 * Check if the preset execution time cap restricts guest execution scheduling.
2064 *
2065 * @returns true if allowed, false otherwise
2066 * @param pVM The cross context VM structure.
2067 * @param pVCpu The cross context virtual CPU structure.
2068 */
2069bool emR3IsExecutionAllowedSlow(PVM pVM, PVMCPU pVCpu)
2070{
2071 Assert(pVM->uCpuExecutionCap != 100);
2072 uint64_t cMsUserTime;
2073 uint64_t cMsKernelTime;
2074 if (RT_SUCCESS(RTThreadGetExecutionTimeMilli(&cMsKernelTime, &cMsUserTime)))
2075 {
2076 uint64_t const msTimeNow = RTTimeMilliTS();
2077 if (pVCpu->em.s.msTimeSliceStart + EM_TIME_SLICE < msTimeNow)
2078 {
2079 /* New time slice. */
2080 pVCpu->em.s.msTimeSliceStart = msTimeNow;
2081 pVCpu->em.s.cMsTimeSliceStartExec = cMsKernelTime + cMsUserTime;
2082 pVCpu->em.s.cMsTimeSliceExec = 0;
2083 }
2084 pVCpu->em.s.cMsTimeSliceExec = cMsKernelTime + cMsUserTime - pVCpu->em.s.cMsTimeSliceStartExec;
2085
2086 bool const fRet = pVCpu->em.s.cMsTimeSliceExec < (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100;
2087 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.msTimeSliceStart,
2088 pVCpu->em.s.cMsTimeSliceStartExec, pVCpu->em.s.cMsTimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2089 return fRet;
2090 }
2091 return true;
2092}
2093
2094
2095/**
2096 * Execute VM.
2097 *
2098 * This function is the main loop of the VM. The emulation thread
2099 * calls this function when the VM has been successfully constructed
2100 * and we're ready for executing the VM.
2101 *
2102 * Returning from this function means that the VM is turned off or
2103 * suspended (state already saved) and deconstruction is next in line.
2104 *
2105 * All interaction from other thread are done using forced actions
2106 * and signalling of the wait object.
2107 *
2108 * @returns VBox status code, informational status codes may indicate failure.
2109 * @param pVM The cross context VM structure.
2110 * @param pVCpu The cross context virtual CPU structure.
2111 */
2112VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2113{
2114 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2115 pVM,
2116 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2117 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2118 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2119 VM_ASSERT_EMT(pVM);
2120 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2121 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2122 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2123 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2124
2125 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2126 if (rc == 0)
2127 {
2128 /*
2129 * Start the virtual time.
2130 */
2131 TMR3NotifyResume(pVM, pVCpu);
2132
2133 /*
2134 * The Outer Main Loop.
2135 */
2136 bool fFFDone = false;
2137
2138 /* Reschedule right away to start in the right state. */
2139 rc = VINF_SUCCESS;
2140
2141 /* If resuming after a pause or a state load, restore the previous
2142 state or else we'll start executing code. Else, just reschedule. */
2143 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2144 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2145 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2146 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2147 else
2148 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2149 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2150
2151 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2152 for (;;)
2153 {
2154 /*
2155 * Before we can schedule anything (we're here because
2156 * scheduling is required) we must service any pending
2157 * forced actions to avoid any pending action causing
2158 * immediate rescheduling upon entering an inner loop
2159 *
2160 * Do forced actions.
2161 */
2162 if ( !fFFDone
2163 && RT_SUCCESS(rc)
2164 && rc != VINF_EM_TERMINATE
2165 && rc != VINF_EM_OFF
2166 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2167 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2168 {
2169 rc = emR3ForcedActions(pVM, pVCpu, rc);
2170 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2171 }
2172 else if (fFFDone)
2173 fFFDone = false;
2174
2175#if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8)
2176 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2177#endif
2178
2179 /*
2180 * Now what to do?
2181 */
2182 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2183 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2184 switch (rc)
2185 {
2186 /*
2187 * Keep doing what we're currently doing.
2188 */
2189 case VINF_SUCCESS:
2190 break;
2191
2192 /*
2193 * Reschedule - to main execution engine (HM, NEM, IEM/REM).
2194 */
2195 case VINF_EM_RESCHEDULE_EXEC_ENGINE:
2196 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2197 if (!pVM->em.s.fIemExecutesAll)
2198 {
2199#if !defined(VBOX_VMM_TARGET_ARMV8)
2200 if (VM_IS_HM_ENABLED(pVM))
2201 {
2202 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2203 {
2204 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2205 pVCpu->em.s.enmState = EMSTATE_HM;
2206 break;
2207 }
2208 }
2209 else
2210#endif
2211 if (VM_IS_NEM_ENABLED(pVM) && NEMR3CanExecuteGuest(pVM, pVCpu))
2212 {
2213 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2214 pVCpu->em.s.enmState = EMSTATE_NEM;
2215 break;
2216 }
2217 }
2218
2219 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_RECOMPILER)\n", enmOldState, EMSTATE_RECOMPILER));
2220 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2221 break;
2222
2223 /*
2224 * Reschedule - to recompiled execution.
2225 */
2226 case VINF_EM_RESCHEDULE_REM:
2227 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2228 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n",
2229 enmOldState, EMSTATE_RECOMPILER));
2230 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2231 break;
2232
2233 /*
2234 * Resume.
2235 */
2236 case VINF_EM_RESUME:
2237 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2238 /* Don't reschedule in the halted or wait for SIPI case. */
2239 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2240 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2241 {
2242 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2243 break;
2244 }
2245 /* fall through and get scheduled. */
2246 RT_FALL_THRU();
2247
2248 /*
2249 * Reschedule.
2250 */
2251 case VINF_EM_RESCHEDULE:
2252 {
2253 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2254 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2255 pVCpu->em.s.enmState = enmState;
2256 break;
2257 }
2258
2259 /*
2260 * Halted.
2261 */
2262 case VINF_EM_HALT:
2263 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2264 pVCpu->em.s.enmState = EMSTATE_HALTED;
2265 break;
2266
2267 /*
2268 * Switch to the wait for SIPI state (application processor only)
2269 */
2270 case VINF_EM_WAIT_SIPI:
2271 Assert(pVCpu->idCpu != 0);
2272 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2273 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2274 break;
2275
2276
2277 /*
2278 * Suspend.
2279 */
2280 case VINF_EM_SUSPEND:
2281 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2282 Assert(enmOldState != EMSTATE_SUSPENDED);
2283 pVCpu->em.s.enmPrevState = enmOldState;
2284 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2285 break;
2286
2287 /*
2288 * Reset.
2289 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2290 */
2291 case VINF_EM_RESET:
2292 {
2293 if (pVCpu->idCpu == 0)
2294 {
2295 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2296 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2297 pVCpu->em.s.enmState = enmState;
2298 }
2299 else
2300 {
2301 /* All other VCPUs go into the wait for SIPI state. */
2302 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2303 }
2304 break;
2305 }
2306
2307 /*
2308 * Power Off.
2309 */
2310 case VINF_EM_OFF:
2311 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2312 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2313 TMR3NotifySuspend(pVM, pVCpu);
2314 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2315 return rc;
2316
2317 /*
2318 * Terminate the VM.
2319 */
2320 case VINF_EM_TERMINATE:
2321 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2322 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2323 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2324 TMR3NotifySuspend(pVM, pVCpu);
2325 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2326 return rc;
2327
2328
2329 /*
2330 * Out of memory, suspend the VM and stuff.
2331 */
2332 case VINF_EM_NO_MEMORY:
2333 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2334 Assert(enmOldState != EMSTATE_SUSPENDED);
2335 pVCpu->em.s.enmPrevState = enmOldState;
2336 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2337 TMR3NotifySuspend(pVM, pVCpu);
2338 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2339
2340 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2341 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2342 if (rc != VINF_EM_SUSPEND)
2343 {
2344 if (RT_SUCCESS_NP(rc))
2345 {
2346 AssertLogRelMsgFailed(("%Rrc\n", rc));
2347 rc = VERR_EM_INTERNAL_ERROR;
2348 }
2349 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2350 }
2351 return rc;
2352
2353 /*
2354 * Guest debug events.
2355 */
2356 case VINF_EM_DBG_STEPPED:
2357 case VINF_EM_DBG_STOP:
2358 case VINF_EM_DBG_EVENT:
2359 case VINF_EM_DBG_BREAKPOINT:
2360 case VINF_EM_DBG_STEP:
2361 if (enmOldState == EMSTATE_HM)
2362 {
2363 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2364 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2365 }
2366 else if (enmOldState == EMSTATE_NEM)
2367 {
2368 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2369 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2370 }
2371 else if (enmOldState == EMSTATE_RECOMPILER)
2372 {
2373 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RECOMPILER));
2374 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RECOMPILER;
2375 }
2376 else
2377 {
2378 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2379 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2380 }
2381 break;
2382
2383 /*
2384 * Hypervisor debug events.
2385 */
2386 case VINF_EM_DBG_HYPER_STEPPED:
2387 case VINF_EM_DBG_HYPER_BREAKPOINT:
2388 case VINF_EM_DBG_HYPER_ASSERTION:
2389 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2390 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2391 break;
2392
2393 /*
2394 * Triple fault.
2395 */
2396 case VINF_EM_TRIPLE_FAULT:
2397 if (!pVM->em.s.fGuruOnTripleFault)
2398 {
2399 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2400 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2401 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2402 continue;
2403 }
2404 /* Else fall through and trigger a guru. */
2405 RT_FALL_THRU();
2406
2407 case VERR_VMM_RING0_ASSERTION:
2408 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2409 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2410 break;
2411
2412 /*
2413 * Any error code showing up here other than the ones we
2414 * know and process above are considered to be FATAL.
2415 *
2416 * Unknown warnings and informational status codes are also
2417 * included in this.
2418 */
2419 default:
2420 if (RT_SUCCESS_NP(rc))
2421 {
2422 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2423 rc = VERR_EM_INTERNAL_ERROR;
2424 }
2425 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2426 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2427 break;
2428 }
2429
2430 /*
2431 * Act on state transition.
2432 */
2433 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2434 if (enmOldState != enmNewState)
2435 {
2436 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2437
2438 /* Clear MWait flags and the unhalt FF. */
2439 if ( enmOldState == EMSTATE_HALTED
2440 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2441 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2442 && ( enmNewState == EMSTATE_HM
2443 || enmNewState == EMSTATE_NEM
2444 || enmNewState == EMSTATE_RECOMPILER
2445 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2446 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2447 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2448 || enmNewState == EMSTATE_DEBUG_GUEST_RECOMPILER) )
2449 {
2450 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2451 {
2452 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2453 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2454 }
2455 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2456 {
2457 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2458 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2459 }
2460 }
2461 }
2462 else
2463 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2464
2465 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2466 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2467
2468 /*
2469 * Act on the new state.
2470 */
2471 switch (enmNewState)
2472 {
2473 /*
2474 * Execute hardware accelerated raw.
2475 */
2476 case EMSTATE_HM:
2477#if defined(VBOX_VMM_TARGET_ARMV8)
2478 AssertReleaseFailed(); /* Should never get here. */
2479#else
2480 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2481#endif
2482 break;
2483
2484 /*
2485 * Execute hardware accelerated raw.
2486 */
2487 case EMSTATE_NEM:
2488 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2489 break;
2490
2491 /*
2492 * Execute recompiled.
2493 */
2494 case EMSTATE_RECOMPILER:
2495 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, &fFFDone));
2496 Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc));
2497 break;
2498
2499 /*
2500 * Execute in the interpreter.
2501 */
2502 case EMSTATE_IEM:
2503 {
2504#if 0 /* For comparing HM and IEM (@bugref{10464}). */
2505 PCPUMCTX const pCtx = &pVCpu->cpum.GstCtx;
2506 PCX86FXSTATE const pX87 = &pCtx->XState.x87;
2507 Log11(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
2508 "eip=%08x esp=%08x ebp=%08x eflags=%08x\n"
2509 "cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x\n"
2510 "fsw=%04x fcw=%04x ftw=%02x top=%u%s%s%s%s%s%s%s%s%s\n"
2511 "st0=%.10Rhxs st1=%.10Rhxs st2=%.10Rhxs st3=%.10Rhxs\n"
2512 "st4=%.10Rhxs st5=%.10Rhxs st6=%.10Rhxs st7=%.10Rhxs\n",
2513 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->edi, pCtx->edi,
2514 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.u,
2515 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, pCtx->fs.Sel, pCtx->gs.Sel,
2516 pX87->FSW, pX87->FCW, pX87->FTW, X86_FSW_TOP_GET(pX87->FSW),
2517 pX87->FSW & X86_FSW_ES ? " ES!" : "",
2518 pX87->FSW & X86_FSW_IE ? " IE" : "",
2519 pX87->FSW & X86_FSW_DE ? " DE" : "",
2520 pX87->FSW & X86_FSW_SF ? " SF" : "",
2521 pX87->FSW & X86_FSW_B ? " B!" : "",
2522 pX87->FSW & X86_FSW_C0 ? " C0" : "",
2523 pX87->FSW & X86_FSW_C1 ? " C1" : "",
2524 pX87->FSW & X86_FSW_C2 ? " C2" : "",
2525 pX87->FSW & X86_FSW_C3 ? " C3" : "",
2526 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(0)],
2527 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(1)],
2528 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(2)],
2529 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(3)],
2530 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(4)],
2531 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(5)],
2532 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(6)],
2533 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(7)]));
2534 DBGFR3DisasInstrCurrentLogInternal(pVCpu, NULL);
2535#endif
2536
2537 uint32_t cInstructions = 0;
2538#if 0 /* For testing purposes. */
2539 //STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2540 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2541 //STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2542 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_EXEC_ENGINE || rc == VINF_EM_RESCHEDULE_REM)
2543 rc = VINF_SUCCESS;
2544 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2545#endif
2546 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2547 if (pVM->em.s.fIemExecutesAll)
2548 {
2549 Assert(rc != VINF_EM_RESCHEDULE_REM);
2550 Assert(rc != VINF_EM_RESCHEDULE_EXEC_ENGINE);
2551#ifdef VBOX_HIGH_RES_TIMERS_HACK
2552 if (cInstructions < 2048)
2553 TMTimerPollVoid(pVM, pVCpu);
2554#endif
2555 }
2556 else
2557 rc = VINF_EM_RESCHEDULE; /* Need to check whether we can run in HM or NEM again. */
2558 fFFDone = false;
2559 break;
2560 }
2561
2562 /*
2563 * Application processor execution halted until SIPI.
2564 */
2565 case EMSTATE_WAIT_SIPI:
2566 /* no break */
2567 /*
2568 * hlt - execution halted until interrupt.
2569 */
2570 case EMSTATE_HALTED:
2571 {
2572 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2573 /* If HM (or someone else) store a pending interrupt in
2574 TRPM, it must be dispatched ASAP without any halting.
2575 Anything pending in TRPM has been accepted and the CPU
2576 should already be the right state to receive it. */
2577 if (TRPMHasTrap(pVCpu))
2578 rc = VINF_EM_RESCHEDULE;
2579#if !defined(VBOX_VMM_TARGET_ARMV8)
2580 /* MWAIT has a special extension where it's woken up when
2581 an interrupt is pending even when IF=0. */
2582 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2583 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2584 {
2585 rc = VMR3WaitHalted(pVM, pVCpu, 0 /*fFlags*/);
2586 if (rc == VINF_SUCCESS)
2587 {
2588 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2589 APICUpdatePendingInterrupts(pVCpu);
2590
2591 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2592 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2593 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2594 {
2595 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2596 rc = VINF_EM_RESCHEDULE;
2597 }
2598
2599 }
2600 }
2601#endif
2602 else
2603 {
2604#if defined(VBOX_VMM_TARGET_ARMV8)
2605 const uint32_t fWaitHalted = 0; /* WFI/WFE always return when an interrupt happens. */
2606#else
2607 const uint32_t fWaitHalted = (CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF) ? 0 : VMWAITHALTED_F_IGNORE_IRQS;
2608#endif
2609 rc = VMR3WaitHalted(pVM, pVCpu, fWaitHalted);
2610 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2611 check VMCPU_FF_UPDATE_APIC here. */
2612 if ( rc == VINF_SUCCESS
2613#if defined(VBOX_VMM_TARGET_ARMV8)
2614 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_VTIMER_ACTIVATED
2615 | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_INTERRUPT_IRQ)
2616#else
2617 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)
2618#endif
2619 )
2620 {
2621 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2622 rc = VINF_EM_RESCHEDULE;
2623 }
2624 }
2625
2626 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2627 break;
2628 }
2629
2630 /*
2631 * Suspended - return to VM.cpp.
2632 */
2633 case EMSTATE_SUSPENDED:
2634 TMR3NotifySuspend(pVM, pVCpu);
2635 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2636 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2637 return VINF_EM_SUSPEND;
2638
2639 /*
2640 * Debugging in the guest.
2641 */
2642 case EMSTATE_DEBUG_GUEST_RAW:
2643 case EMSTATE_DEBUG_GUEST_HM:
2644 case EMSTATE_DEBUG_GUEST_NEM:
2645 case EMSTATE_DEBUG_GUEST_IEM:
2646 case EMSTATE_DEBUG_GUEST_RECOMPILER:
2647 TMR3NotifySuspend(pVM, pVCpu);
2648 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2649 TMR3NotifyResume(pVM, pVCpu);
2650 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2651 break;
2652
2653 /*
2654 * Debugging in the hypervisor.
2655 */
2656 case EMSTATE_DEBUG_HYPER:
2657 {
2658 TMR3NotifySuspend(pVM, pVCpu);
2659 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2660
2661 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2662 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2663 if (rc != VINF_SUCCESS)
2664 {
2665 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2666 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2667 else
2668 {
2669 /* switch to guru meditation mode */
2670 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2671 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2672 VMMR3FatalDump(pVM, pVCpu, rc);
2673 }
2674 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2675 return rc;
2676 }
2677
2678 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2679 TMR3NotifyResume(pVM, pVCpu);
2680 break;
2681 }
2682
2683 /*
2684 * Guru meditation takes place in the debugger.
2685 */
2686 case EMSTATE_GURU_MEDITATION:
2687 {
2688 TMR3NotifySuspend(pVM, pVCpu);
2689 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2690 VMMR3FatalDump(pVM, pVCpu, rc);
2691 emR3Debug(pVM, pVCpu, rc);
2692 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2693 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2694 return rc;
2695 }
2696
2697 /*
2698 * The states we don't expect here.
2699 */
2700 case EMSTATE_NONE:
2701 case EMSTATE_RAW_OBSOLETE:
2702 case EMSTATE_IEM_THEN_REM_OBSOLETE:
2703 case EMSTATE_TERMINATING:
2704 default:
2705 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2706 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2707 TMR3NotifySuspend(pVM, pVCpu);
2708 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2709 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2710 return VERR_EM_INTERNAL_ERROR;
2711 }
2712 } /* The Outer Main Loop */
2713 }
2714 else
2715 {
2716 /*
2717 * Fatal error.
2718 */
2719 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2720 TMR3NotifySuspend(pVM, pVCpu);
2721 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2722 VMMR3FatalDump(pVM, pVCpu, rc);
2723 emR3Debug(pVM, pVCpu, rc);
2724 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2725 /** @todo change the VM state! */
2726 return rc;
2727 }
2728
2729 /* not reached */
2730}
2731
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette