VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 97698

Last change on this file since 97698 was 97331, checked in by vboxsync, 2 years ago

VMM/EMAll: Shut up harmless assertion in EMHistoryUpdateFlagsAndTypeAndPC that got triggered by bs3-cpu-weird-1 in VT-x mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 38.9 KB
Line 
1/* $Id: EMAll.cpp 97331 2022-10-28 08:42:43Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/mm.h>
35#include <VBox/vmm/selm.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/vmm/hm.h>
40#include <VBox/vmm/pdmapi.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/stam.h>
43#include "EMInternal.h"
44#include <VBox/vmm/vmcc.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include <VBox/log.h>
50#include <iprt/assert.h>
51#include <iprt/string.h>
52
53
54
55
56/**
57 * Get the current execution manager status.
58 *
59 * @returns Current status.
60 * @param pVCpu The cross context virtual CPU structure.
61 */
62VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
63{
64 return pVCpu->em.s.enmState;
65}
66
67
68/**
69 * Sets the current execution manager status. (use only when you know what you're doing!)
70 *
71 * @param pVCpu The cross context virtual CPU structure.
72 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
73 */
74VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
75{
76 /* Only allowed combination: */
77 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
78 pVCpu->em.s.enmState = enmNewState;
79}
80
81
82/**
83 * Enables / disable hypercall instructions.
84 *
85 * This interface is used by GIM to tell the execution monitors whether the
86 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
87 *
88 * @param pVCpu The cross context virtual CPU structure this applies to.
89 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
90 */
91VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
92{
93 pVCpu->em.s.fHypercallEnabled = fEnabled;
94}
95
96
97/**
98 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
99 *
100 * @returns true if enabled, false if not.
101 * @param pVCpu The cross context virtual CPU structure.
102 *
103 * @note If this call becomes a performance factor, we can make the data
104 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
105 */
106VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
107{
108 return pVCpu->em.s.fHypercallEnabled;
109}
110
111
112/**
113 * Prepare an MWAIT - essentials of the MONITOR instruction.
114 *
115 * @returns VINF_SUCCESS
116 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
117 * @param rax The content of RAX.
118 * @param rcx The content of RCX.
119 * @param rdx The content of RDX.
120 * @param GCPhys The physical address corresponding to rax.
121 */
122VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
123{
124 pVCpu->em.s.MWait.uMonitorRAX = rax;
125 pVCpu->em.s.MWait.uMonitorRCX = rcx;
126 pVCpu->em.s.MWait.uMonitorRDX = rdx;
127 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
128 /** @todo Make use of GCPhys. */
129 NOREF(GCPhys);
130 /** @todo Complete MONITOR implementation. */
131 return VINF_SUCCESS;
132}
133
134
135/**
136 * Checks if the monitor hardware is armed / active.
137 *
138 * @returns true if armed, false otherwise.
139 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
140 */
141VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
142{
143 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
144}
145
146
147/**
148 * Checks if we're in a MWAIT.
149 *
150 * @retval 1 if regular,
151 * @retval > 1 if MWAIT with EMMWAIT_FLAG_BREAKIRQIF0
152 * @retval 0 if not armed
153 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
154 */
155VMM_INT_DECL(unsigned) EMMonitorWaitIsActive(PVMCPU pVCpu)
156{
157 uint32_t fWait = pVCpu->em.s.MWait.fWait;
158 AssertCompile(EMMWAIT_FLAG_ACTIVE == 1);
159 AssertCompile(EMMWAIT_FLAG_BREAKIRQIF0 == 2);
160 AssertCompile((EMMWAIT_FLAG_ACTIVE << 1) == EMMWAIT_FLAG_BREAKIRQIF0);
161 return fWait & (EMMWAIT_FLAG_ACTIVE | ((fWait & EMMWAIT_FLAG_ACTIVE) << 1));
162}
163
164
165/**
166 * Performs an MWAIT.
167 *
168 * @returns VINF_SUCCESS
169 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
170 * @param rax The content of RAX.
171 * @param rcx The content of RCX.
172 */
173VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
174{
175 pVCpu->em.s.MWait.uMWaitRAX = rax;
176 pVCpu->em.s.MWait.uMWaitRCX = rcx;
177 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
178 if (rcx)
179 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
180 else
181 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
182 /** @todo not completely correct?? */
183 return VINF_EM_HALT;
184}
185
186
187/**
188 * Clears any address-range monitoring that is active.
189 *
190 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
191 */
192VMM_INT_DECL(void) EMMonitorWaitClear(PVMCPU pVCpu)
193{
194 LogFlowFunc(("Clearing MWAIT\n"));
195 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
196}
197
198
199/**
200 * Determine if we should continue execution in HM after encountering an mwait
201 * instruction.
202 *
203 * Clears MWAIT flags if returning @c true.
204 *
205 * @returns true if we should continue, false if we should halt.
206 * @param pVCpu The cross context virtual CPU structure.
207 * @param pCtx Current CPU context.
208 */
209VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
210{
211 if (CPUMGetGuestGif(pCtx))
212 {
213 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
214 || ( CPUMIsGuestInNestedHwvirtMode(pCtx)
215 && CPUMIsGuestVirtIntrEnabled(pVCpu))
216 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
217 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
218 {
219 if (VMCPU_FF_IS_ANY_SET(pVCpu, ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
220 | VMCPU_FF_INTERRUPT_NESTED_GUEST)))
221 {
222 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
223 return true;
224 }
225 }
226 }
227
228 return false;
229}
230
231
232/**
233 * Determine if we should continue execution in HM after encountering a hlt
234 * instruction.
235 *
236 * @returns true if we should continue, false if we should halt.
237 * @param pVCpu The cross context virtual CPU structure.
238 * @param pCtx Current CPU context.
239 */
240VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
241{
242 if (CPUMGetGuestGif(pCtx))
243 {
244 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
245 return VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
246
247 if ( CPUMIsGuestInNestedHwvirtMode(pCtx)
248 && CPUMIsGuestVirtIntrEnabled(pVCpu))
249 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
250 }
251 return false;
252}
253
254
255/**
256 * Unhalts and wakes up the given CPU.
257 *
258 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
259 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
260 * the CPU isn't currently in a halt, the next HLT instruction it executes will
261 * be affected.
262 *
263 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
264 * @param pVM The cross context VM structure.
265 * @param pVCpuDst The cross context virtual CPU structure of the
266 * CPU to unhalt and wake up. This is usually not the
267 * same as the caller.
268 * @thread EMT
269 */
270VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVMCC pVM, PVMCPUCC pVCpuDst)
271{
272 /*
273 * Flag the current(/next) HLT to unhalt immediately.
274 */
275 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
276
277 /*
278 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
279 * just do it here for now).
280 */
281#ifdef IN_RING0
282 /* We might be here with preemption disabled or enabled (i.e. depending on
283 thread-context hooks being used), so don't try obtaining the GVMMR0 used
284 lock here. See @bugref{7270#c148}. */
285 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
286 AssertRC(rc);
287
288#elif defined(IN_RING3)
289 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, 0 /*fFlags*/);
290 int rc = VINF_SUCCESS;
291 RT_NOREF(pVM);
292
293#else
294 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
295 Assert(pVM->cCpus == 1); NOREF(pVM);
296 int rc = VINF_SUCCESS;
297#endif
298 return rc;
299}
300
301#ifndef IN_RING3
302
303/**
304 * Makes an I/O port write pending for ring-3 processing.
305 *
306 * @returns VINF_EM_PENDING_R3_IOPORT_READ
307 * @param pVCpu The cross context virtual CPU structure.
308 * @param uPort The I/O port.
309 * @param cbInstr The instruction length (for RIP updating).
310 * @param cbValue The write size.
311 * @param uValue The value being written.
312 * @sa emR3ExecutePendingIoPortWrite
313 *
314 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
315 */
316VMMRZ_INT_DECL(VBOXSTRICTRC)
317EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
318{
319 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
320 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
321 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
322 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
323 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
324 return VINF_EM_PENDING_R3_IOPORT_WRITE;
325}
326
327
328/**
329 * Makes an I/O port read pending for ring-3 processing.
330 *
331 * @returns VINF_EM_PENDING_R3_IOPORT_READ
332 * @param pVCpu The cross context virtual CPU structure.
333 * @param uPort The I/O port.
334 * @param cbInstr The instruction length (for RIP updating).
335 * @param cbValue The read size.
336 * @sa emR3ExecutePendingIoPortRead
337 *
338 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
339 */
340VMMRZ_INT_DECL(VBOXSTRICTRC)
341EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
342{
343 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
344 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
345 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
346 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
347 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
348 return VINF_EM_PENDING_R3_IOPORT_READ;
349}
350
351#endif /* IN_RING3 */
352
353
354/**
355 * Worker for EMHistoryExec that checks for ring-3 returns and flags
356 * continuation of the EMHistoryExec run there.
357 */
358DECL_FORCE_INLINE(void) emHistoryExecSetContinueExitRecIdx(PVMCPU pVCpu, VBOXSTRICTRC rcStrict, PCEMEXITREC pExitRec)
359{
360 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
361#ifdef IN_RING3
362 RT_NOREF_PV(rcStrict); RT_NOREF_PV(pExitRec);
363#else
364 switch (VBOXSTRICTRC_VAL(rcStrict))
365 {
366 case VINF_SUCCESS:
367 default:
368 break;
369
370 /*
371 * Only status codes that EMHandleRCTmpl.h will resume EMHistoryExec with.
372 */
373 case VINF_IOM_R3_IOPORT_READ: /* -> emR3ExecuteIOInstruction */
374 case VINF_IOM_R3_IOPORT_WRITE: /* -> emR3ExecuteIOInstruction */
375 case VINF_IOM_R3_IOPORT_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
376 case VINF_IOM_R3_MMIO_READ: /* -> emR3ExecuteInstruction */
377 case VINF_IOM_R3_MMIO_WRITE: /* -> emR3ExecuteInstruction */
378 case VINF_IOM_R3_MMIO_READ_WRITE: /* -> emR3ExecuteInstruction */
379 case VINF_IOM_R3_MMIO_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
380 case VINF_CPUM_R3_MSR_READ: /* -> emR3ExecuteInstruction */
381 case VINF_CPUM_R3_MSR_WRITE: /* -> emR3ExecuteInstruction */
382 case VINF_GIM_R3_HYPERCALL: /* -> emR3ExecuteInstruction */
383 pVCpu->em.s.idxContinueExitRec = (uint16_t)(pExitRec - &pVCpu->em.s.aExitRecords[0]);
384 break;
385 }
386#endif /* !IN_RING3 */
387}
388
389
390/**
391 * Execute using history.
392 *
393 * This function will be called when EMHistoryAddExit() and friends returns a
394 * non-NULL result. This happens in response to probing or when probing has
395 * uncovered adjacent exits which can more effectively be reached by using IEM
396 * than restarting execution using the main execution engine and fielding an
397 * regular exit.
398 *
399 * @returns VBox strict status code, see IEMExecForExits.
400 * @param pVCpu The cross context virtual CPU structure.
401 * @param pExitRec The exit record return by a previous history add
402 * or update call.
403 * @param fWillExit Flags indicating to IEM what will cause exits, TBD.
404 */
405VMM_INT_DECL(VBOXSTRICTRC) EMHistoryExec(PVMCPUCC pVCpu, PCEMEXITREC pExitRec, uint32_t fWillExit)
406{
407 Assert(pExitRec);
408 VMCPU_ASSERT_EMT(pVCpu);
409 IEMEXECFOREXITSTATS ExecStats;
410 switch (pExitRec->enmAction)
411 {
412 /*
413 * Executes multiple instruction stopping only when we've gone a given
414 * number without perceived exits.
415 */
416 case EMEXITACTION_EXEC_WITH_MAX:
417 {
418 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryExec, a);
419 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %RX64, max %u\n", pExitRec->uFlatPC, pExitRec->cMaxInstructionsWithoutExit));
420 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
421 pExitRec->cMaxInstructionsWithoutExit /* cMinInstructions*/,
422 pVCpu->em.s.cHistoryExecMaxInstructions,
423 pExitRec->cMaxInstructionsWithoutExit,
424 &ExecStats);
425 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
426 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
427 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRec);
428
429 /* Ignore instructions IEM doesn't know about. */
430 if ( ( rcStrict != VERR_IEM_INSTR_NOT_IMPLEMENTED
431 && rcStrict != VERR_IEM_ASPECT_NOT_IMPLEMENTED)
432 || ExecStats.cInstructions == 0)
433 { /* likely */ }
434 else
435 rcStrict = VINF_SUCCESS;
436
437 if (ExecStats.cExits > 1)
438 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecSavedExits, ExecStats.cExits - 1);
439 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecInstructions, ExecStats.cInstructions);
440 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryExec, a);
441 return rcStrict;
442 }
443
444 /*
445 * Probe a exit for close by exits.
446 */
447 case EMEXITACTION_EXEC_PROBE:
448 {
449 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryProbe, b);
450 LogFlow(("EMHistoryExec/EXEC_PROBE: %RX64\n", pExitRec->uFlatPC));
451 PEMEXITREC pExitRecUnconst = (PEMEXITREC)pExitRec;
452 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
453 pVCpu->em.s.cHistoryProbeMinInstructions,
454 pVCpu->em.s.cHistoryExecMaxInstructions,
455 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit,
456 &ExecStats);
457 LogFlow(("EMHistoryExec/EXEC_PROBE: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
458 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
459 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRecUnconst);
460 if ( ExecStats.cExits >= 2
461 && RT_SUCCESS(rcStrict))
462 {
463 Assert(ExecStats.cMaxExitDistance > 0 && ExecStats.cMaxExitDistance <= 32);
464 pExitRecUnconst->cMaxInstructionsWithoutExit = ExecStats.cMaxExitDistance;
465 pExitRecUnconst->enmAction = EMEXITACTION_EXEC_WITH_MAX;
466 LogFlow(("EMHistoryExec/EXEC_PROBE: -> EXEC_WITH_MAX %u\n", ExecStats.cMaxExitDistance));
467 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedExecWithMax);
468 }
469#ifndef IN_RING3
470 else if ( pVCpu->em.s.idxContinueExitRec != UINT16_MAX
471 && RT_SUCCESS(rcStrict))
472 {
473 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedToRing3);
474 LogFlow(("EMHistoryExec/EXEC_PROBE: -> ring-3\n"));
475 }
476#endif
477 else
478 {
479 pExitRecUnconst->enmAction = EMEXITACTION_NORMAL_PROBED;
480 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
481 LogFlow(("EMHistoryExec/EXEC_PROBE: -> PROBED\n"));
482 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedNormal);
483 if ( rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED
484 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
485 rcStrict = VINF_SUCCESS;
486 }
487 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryProbeInstructions, ExecStats.cInstructions);
488 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryProbe, b);
489 return rcStrict;
490 }
491
492 /* We shouldn't ever see these here! */
493 case EMEXITACTION_FREE_RECORD:
494 case EMEXITACTION_NORMAL:
495 case EMEXITACTION_NORMAL_PROBED:
496 break;
497
498 /* No default case, want compiler warnings. */
499 }
500 AssertLogRelFailedReturn(VERR_EM_INTERNAL_ERROR);
501}
502
503
504/**
505 * Worker for emHistoryAddOrUpdateRecord.
506 */
507DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInit(PEMEXITREC pExitRec, uint64_t uFlatPC, uint32_t uFlagsAndType, uint64_t uExitNo)
508{
509 pExitRec->uFlatPC = uFlatPC;
510 pExitRec->uFlagsAndType = uFlagsAndType;
511 pExitRec->enmAction = EMEXITACTION_NORMAL;
512 pExitRec->bUnused = 0;
513 pExitRec->cMaxInstructionsWithoutExit = 64;
514 pExitRec->uLastExitNo = uExitNo;
515 pExitRec->cHits = 1;
516 return NULL;
517}
518
519
520/**
521 * Worker for emHistoryAddOrUpdateRecord.
522 */
523DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitNew(PVMCPU pVCpu, PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
524 PEMEXITREC pExitRec, uint64_t uFlatPC,
525 uint32_t uFlagsAndType, uint64_t uExitNo)
526{
527 pHistEntry->idxSlot = (uint32_t)idxSlot;
528 pVCpu->em.s.cExitRecordUsed++;
529 LogFlow(("emHistoryRecordInitNew: [%#x] = %#07x %016RX64; (%u of %u used)\n", idxSlot, uFlagsAndType, uFlatPC,
530 pVCpu->em.s.cExitRecordUsed, RT_ELEMENTS(pVCpu->em.s.aExitRecords) ));
531 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
532}
533
534
535/**
536 * Worker for emHistoryAddOrUpdateRecord.
537 */
538DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitReplacement(PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
539 PEMEXITREC pExitRec, uint64_t uFlatPC,
540 uint32_t uFlagsAndType, uint64_t uExitNo)
541{
542 pHistEntry->idxSlot = (uint32_t)idxSlot;
543 LogFlow(("emHistoryRecordInitReplacement: [%#x] = %#07x %016RX64 replacing %#07x %016RX64 with %u hits, %u exits old\n",
544 idxSlot, uFlagsAndType, uFlatPC, pExitRec->uFlagsAndType, pExitRec->uFlatPC, pExitRec->cHits,
545 uExitNo - pExitRec->uLastExitNo));
546 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
547}
548
549
550/**
551 * Adds or updates the EMEXITREC for this PC/type and decide on an action.
552 *
553 * @returns Pointer to an exit record if special action should be taken using
554 * EMHistoryExec(). Take normal exit action when NULL.
555 *
556 * @param pVCpu The cross context virtual CPU structure.
557 * @param uFlagsAndType Combined flags and type, EMEXIT_F_KIND_EM set and
558 * both EMEXIT_F_CS_EIP and EMEXIT_F_UNFLATTENED_PC are clear.
559 * @param uFlatPC The flattened program counter.
560 * @param pHistEntry The exit history entry.
561 * @param uExitNo The current exit number.
562 */
563static PCEMEXITREC emHistoryAddOrUpdateRecord(PVMCPU pVCpu, uint64_t uFlagsAndType, uint64_t uFlatPC,
564 PEMEXITENTRY pHistEntry, uint64_t uExitNo)
565{
566# ifdef IN_RING0
567 /* Disregard the hm flag. */
568 uFlagsAndType &= ~EMEXIT_F_HM;
569# endif
570
571 /*
572 * Work the hash table.
573 */
574 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitRecords) == 1024);
575# define EM_EXIT_RECORDS_IDX_MASK 0x3ff
576 uintptr_t idxSlot = ((uintptr_t)uFlatPC >> 1) & EM_EXIT_RECORDS_IDX_MASK;
577 PEMEXITREC pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
578 if (pExitRec->uFlatPC == uFlatPC)
579 {
580 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
581 pHistEntry->idxSlot = (uint32_t)idxSlot;
582 if (pExitRec->uFlagsAndType == uFlagsAndType)
583 {
584 pExitRec->uLastExitNo = uExitNo;
585 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[0]);
586 }
587 else
588 {
589 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[0]);
590 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
591 }
592 }
593 else if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
594 {
595 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[0]);
596 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
597 }
598 else
599 {
600 /*
601 * Collision. We calculate a new hash for stepping away from the first,
602 * doing up to 8 steps away before replacing the least recently used record.
603 */
604 uintptr_t idxOldest = idxSlot;
605 uint64_t uOldestExitNo = pExitRec->uLastExitNo;
606 unsigned iOldestStep = 0;
607 unsigned iStep = 1;
608 uintptr_t const idxAdd = (uintptr_t)(uFlatPC >> 11) & (EM_EXIT_RECORDS_IDX_MASK / 4);
609 for (;;)
610 {
611 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
612 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecNew) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
613 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
614 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecTypeChanged) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
615
616 /* Step to the next slot. */
617 idxSlot += idxAdd;
618 idxSlot &= EM_EXIT_RECORDS_IDX_MASK;
619 pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
620
621 /* Does it match? */
622 if (pExitRec->uFlatPC == uFlatPC)
623 {
624 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
625 pHistEntry->idxSlot = (uint32_t)idxSlot;
626 if (pExitRec->uFlagsAndType == uFlagsAndType)
627 {
628 pExitRec->uLastExitNo = uExitNo;
629 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[iStep]);
630 break;
631 }
632 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[iStep]);
633 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
634 }
635
636 /* Is it free? */
637 if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
638 {
639 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[iStep]);
640 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
641 }
642
643 /* Is it the least recently used one? */
644 if (pExitRec->uLastExitNo < uOldestExitNo)
645 {
646 uOldestExitNo = pExitRec->uLastExitNo;
647 idxOldest = idxSlot;
648 iOldestStep = iStep;
649 }
650
651 /* Next iteration? */
652 iStep++;
653 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced));
654 if (RT_LIKELY(iStep < 8 + 1))
655 { /* likely */ }
656 else
657 {
658 /* Replace the least recently used slot. */
659 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecReplaced[iOldestStep]);
660 pExitRec = &pVCpu->em.s.aExitRecords[idxOldest];
661 return emHistoryRecordInitReplacement(pHistEntry, idxOldest, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
662 }
663 }
664 }
665
666 /*
667 * Found an existing record.
668 */
669 switch (pExitRec->enmAction)
670 {
671 case EMEXITACTION_NORMAL:
672 {
673 uint64_t const cHits = ++pExitRec->cHits;
674 if (cHits < 256)
675 return NULL;
676 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> EXEC_PROBE\n", idxSlot, uFlagsAndType, uFlatPC));
677 pExitRec->enmAction = EMEXITACTION_EXEC_PROBE;
678 return pExitRec;
679 }
680
681 case EMEXITACTION_NORMAL_PROBED:
682 pExitRec->cHits += 1;
683 return NULL;
684
685 default:
686 pExitRec->cHits += 1;
687 return pExitRec;
688
689 /* This will happen if the caller ignores or cannot serve the probe
690 request (forced to ring-3, whatever). We retry this 256 times. */
691 case EMEXITACTION_EXEC_PROBE:
692 {
693 uint64_t const cHits = ++pExitRec->cHits;
694 if (cHits < 512)
695 return pExitRec;
696 pExitRec->enmAction = EMEXITACTION_NORMAL_PROBED;
697 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> PROBED\n", idxSlot, uFlagsAndType, uFlatPC));
698 return NULL;
699 }
700 }
701}
702
703
704/**
705 * Adds an exit to the history for this CPU.
706 *
707 * @returns Pointer to an exit record if special action should be taken using
708 * EMHistoryExec(). Take normal exit action when NULL.
709 *
710 * @param pVCpu The cross context virtual CPU structure.
711 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FT).
712 * @param uFlatPC The flattened program counter (RIP). UINT64_MAX if not available.
713 * @param uTimestamp The TSC value for the exit, 0 if not available.
714 * @thread EMT(pVCpu)
715 */
716VMM_INT_DECL(PCEMEXITREC) EMHistoryAddExit(PVMCPUCC pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC, uint64_t uTimestamp)
717{
718 VMCPU_ASSERT_EMT(pVCpu);
719
720 /*
721 * Add the exit history entry.
722 */
723 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
724 uint64_t uExitNo = pVCpu->em.s.iNextExit++;
725 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
726 pHistEntry->uFlatPC = uFlatPC;
727 pHistEntry->uTimestamp = uTimestamp;
728 pHistEntry->uFlagsAndType = uFlagsAndType;
729 pHistEntry->idxSlot = UINT32_MAX;
730
731 /*
732 * If common exit type, we will insert/update the exit into the exit record hash table.
733 */
734 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
735#ifdef IN_RING0
736 && pVCpu->em.s.fExitOptimizationEnabledR0
737 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
738#else
739 && pVCpu->em.s.fExitOptimizationEnabled
740#endif
741 && uFlatPC != UINT64_MAX
742 )
743 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
744 return NULL;
745}
746
747
748/**
749 * Interface that VT-x uses to supply the PC of an exit when CS:RIP is being read.
750 *
751 * @param pVCpu The cross context virtual CPU structure.
752 * @param uFlatPC The flattened program counter (RIP).
753 * @param fFlattened Set if RIP was subjected to CS.BASE, clear if not.
754 */
755VMM_INT_DECL(void) EMHistoryUpdatePC(PVMCPUCC pVCpu, uint64_t uFlatPC, bool fFlattened)
756{
757 VMCPU_ASSERT_EMT(pVCpu);
758
759 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
760 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
761 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
762 pHistEntry->uFlatPC = uFlatPC;
763 if (fFlattened)
764 pHistEntry->uFlagsAndType &= ~EMEXIT_F_UNFLATTENED_PC;
765 else
766 pHistEntry->uFlagsAndType |= EMEXIT_F_UNFLATTENED_PC;
767}
768
769
770/**
771 * Interface for convering a engine specific exit to a generic one and get guidance.
772 *
773 * @returns Pointer to an exit record if special action should be taken using
774 * EMHistoryExec(). Take normal exit action when NULL.
775 *
776 * @param pVCpu The cross context virtual CPU structure.
777 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
778 * @thread EMT(pVCpu)
779 */
780VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndType(PVMCPUCC pVCpu, uint32_t uFlagsAndType)
781{
782 VMCPU_ASSERT_EMT(pVCpu);
783
784 /*
785 * Do the updating.
786 */
787 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
788 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
789 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
790 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
791
792 /*
793 * If common exit type, we will insert/update the exit into the exit record hash table.
794 */
795 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
796#ifdef IN_RING0
797 && pVCpu->em.s.fExitOptimizationEnabledR0
798 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
799#else
800 && pVCpu->em.s.fExitOptimizationEnabled
801#endif
802 && pHistEntry->uFlatPC != UINT64_MAX
803 )
804 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo);
805 return NULL;
806}
807
808
809/**
810 * Interface for convering a engine specific exit to a generic one and get
811 * guidance, supplying flattened PC too.
812 *
813 * @returns Pointer to an exit record if special action should be taken using
814 * EMHistoryExec(). Take normal exit action when NULL.
815 *
816 * @param pVCpu The cross context virtual CPU structure.
817 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
818 * @param uFlatPC The flattened program counter (RIP).
819 * @thread EMT(pVCpu)
820 */
821VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndTypeAndPC(PVMCPUCC pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC)
822{
823 VMCPU_ASSERT_EMT(pVCpu);
824 //Assert(uFlatPC != UINT64_MAX); - disable to make the pc wrapping tests in bs3-cpu-weird-1 work.
825
826 /*
827 * Do the updating.
828 */
829 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
830 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
831 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
832 pHistEntry->uFlagsAndType = uFlagsAndType;
833 pHistEntry->uFlatPC = uFlatPC;
834
835 /*
836 * If common exit type, we will insert/update the exit into the exit record hash table.
837 */
838 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
839#ifdef IN_RING0
840 && pVCpu->em.s.fExitOptimizationEnabledR0
841 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
842#else
843 && pVCpu->em.s.fExitOptimizationEnabled
844#endif
845 )
846 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
847 return NULL;
848}
849
850
851/**
852 * @callback_method_impl{FNDISREADBYTES}
853 */
854static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
855{
856 PVMCPUCC pVCpu = (PVMCPUCC)pDis->pvUser;
857 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
858
859 /*
860 * Figure how much we can or must read.
861 */
862 size_t cbToRead = GUEST_PAGE_SIZE - (uSrcAddr & (GUEST_PAGE_SIZE - 1));
863 if (cbToRead > cbMaxRead)
864 cbToRead = cbMaxRead;
865 else if (cbToRead < cbMinRead)
866 cbToRead = cbMinRead;
867
868 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
869 if (RT_FAILURE(rc))
870 {
871 if (cbToRead > cbMinRead)
872 {
873 cbToRead = cbMinRead;
874 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
875 }
876 if (RT_FAILURE(rc))
877 {
878 /*
879 * If we fail to find the page via the guest's page tables
880 * we invalidate the page in the host TLB (pertaining to
881 * the guest in the NestedPaging case). See @bugref{6043}.
882 */
883 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
884 {
885 HMInvalidatePage(pVCpu, uSrcAddr);
886 if (((uSrcAddr + cbToRead - 1) >> GUEST_PAGE_SHIFT) != (uSrcAddr >> GUEST_PAGE_SHIFT))
887 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
888 }
889 }
890 }
891
892 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
893 return rc;
894}
895
896
897/**
898 * Disassembles the current instruction.
899 *
900 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
901 * details.
902 *
903 * @param pVCpu The cross context virtual CPU structure.
904 * @param pDis Where to return the parsed instruction info.
905 * @param pcbInstr Where to return the instruction size. (optional)
906 */
907VMM_INT_DECL(int) EMInterpretDisasCurrent(PVMCPUCC pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
908{
909 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
910 RTGCPTR GCPtrInstr;
911#if 0
912 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtx, pCtx->rip, 0, &GCPtrInstr);
913#else
914/** @todo Get the CPU mode as well while we're at it! */
915 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtx->eflags.u, pCtx->ss.Sel, pCtx->cs.Sel, &pCtx->cs, pCtx->rip, &GCPtrInstr);
916#endif
917 if (RT_SUCCESS(rc))
918 return EMInterpretDisasOneEx(pVCpu, (RTGCUINTPTR)GCPtrInstr, pDis, pcbInstr);
919
920 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
921 pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->ss.Sel & X86_SEL_RPL, rc));
922 return rc;
923}
924
925
926/**
927 * Disassembles one instruction.
928 *
929 * This is used by internally by the interpreter and by trap/access handlers.
930 *
931 * @returns VBox status code.
932 *
933 * @param pVCpu The cross context virtual CPU structure.
934 * @param GCPtrInstr The flat address of the instruction.
935 * @param pDis Where to return the parsed instruction info.
936 * @param pcbInstr Where to return the instruction size. (optional)
937 */
938VMM_INT_DECL(int) EMInterpretDisasOneEx(PVMCPUCC pVCpu, RTGCUINTPTR GCPtrInstr, PDISCPUSTATE pDis, unsigned *pcbInstr)
939{
940 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
941 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
942 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
943 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
944 if (RT_SUCCESS(rc))
945 return VINF_SUCCESS;
946 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
947 return rc;
948}
949
950
951/**
952 * Interprets the current instruction.
953 *
954 * @returns VBox status code.
955 * @retval VINF_* Scheduling instructions.
956 * @retval VERR_EM_INTERPRETER Something we can't cope with.
957 * @retval VERR_* Fatal errors.
958 *
959 * @param pVCpu The cross context virtual CPU structure.
960 *
961 * @remark Invalid opcode exceptions have a higher priority than \#GP (see
962 * Intel Architecture System Developers Manual, Vol 3, 5.5) so we don't
963 * need to worry about e.g. invalid modrm combinations (!)
964 */
965VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPUCC pVCpu)
966{
967 LogFlow(("EMInterpretInstruction %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
968
969 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, NULL /*pcbWritten*/);
970 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
971 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
972 rc = VERR_EM_INTERPRETER;
973 if (rc != VINF_SUCCESS)
974 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
975
976 return rc;
977}
978
979
980/**
981 * Interprets the current instruction using the supplied DISCPUSTATE structure.
982 *
983 * IP/EIP/RIP *IS* updated!
984 *
985 * @returns VBox strict status code.
986 * @retval VINF_* Scheduling instructions. When these are returned, it
987 * starts to get a bit tricky to know whether code was
988 * executed or not... We'll address this when it becomes a problem.
989 * @retval VERR_EM_INTERPRETER Something we can't cope with.
990 * @retval VERR_* Fatal errors.
991 *
992 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
993 * @param pDis The disassembler cpu state for the instruction to be
994 * interpreted.
995 * @param rip The instruction pointer value.
996 *
997 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
998 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
999 * to worry about e.g. invalid modrm combinations (!)
1000 *
1001 * @todo At this time we do NOT check if the instruction overwrites vital information.
1002 * Make sure this can't happen!! (will add some assertions/checks later)
1003 */
1004VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPUCC pVCpu, PDISCPUSTATE pDis, uint64_t rip)
1005{
1006 LogFlow(("EMInterpretInstructionDisasState %RGv\n", (RTGCPTR)rip));
1007
1008 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, rip, pDis->abInstr, pDis->cbCachedInstr);
1009 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1010 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1011 rc = VERR_EM_INTERPRETER;
1012
1013 if (rc != VINF_SUCCESS)
1014 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1015
1016 return rc;
1017}
1018
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette