VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 105724

Last change on this file since 105724 was 105698, checked in by vboxsync, 3 months ago

VMM/IEM,TM: Adaptive timer polling and running of the timer queues from the IEM recompiler execution loop. bugref:10656

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 36.2 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 105698 2024-08-15 23:33:49Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 /* We set fSafeToFree to false where as we're being called in the context
76 of a TB callback function, which for native TBs means we cannot release
77 the executable memory till we've returned our way back to iemTbExec as
78 that return path codes via the native code generated for the TB. */
79 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
80 return VINF_IEM_REEXEC_BREAK;
81}
82
83
84/**
85 * Built-in function that does absolutely nothing - for debugging.
86 *
87 * This can be used for artifically increasing the number of calls generated, or
88 * for triggering flushes associated with threaded calls.
89 */
90IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Nop)
91{
92 RT_NOREF(pVCpu, uParam0, uParam1, uParam2);
93 return VINF_SUCCESS;
94}
95
96
97
98/**
99 * This is also called from iemNativeHlpAsmSafeWrapLogCpuState.
100 */
101DECLASM(void) iemThreadedFunc_BltIn_LogCpuStateWorker(PVMCPU pVCpu)
102{
103#ifdef LOG_ENABLED
104 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
105 PCX86FXSTATE const pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
106 Log2(("**** LG%c fExec=%x pTb=%p cUsed=%u\n"
107 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
108 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
109 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
110 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
111 , pTb && (pTb->fFlags & IEMTB_F_TYPE_NATIVE) ? 'n' : 't', pVCpu->iem.s.fExec, pTb, pTb ? pTb->cUsed : 0,
112 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
113 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
115 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
116 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK ));
117#else
118 RT_NOREF(pVCpu);
119#endif
120}
121
122
123/**
124 * Built-in function that logs the current CPU state - for debugging.
125 */
126IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_LogCpuState)
127{
128 iemThreadedFunc_BltIn_LogCpuStateWorker(pVCpu);
129 RT_NOREF(uParam0, uParam1, uParam2);
130 return VINF_SUCCESS;
131}
132
133
134/**
135 * Built-in function that calls a C-implemention function taking zero arguments.
136 */
137IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
138{
139 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
140 uint8_t const cbInstr = (uint8_t)uParam1;
141 RT_NOREF(uParam2);
142 return pfnCImpl(pVCpu, cbInstr);
143}
144
145
146/**
147 * Worker for iemThreadedFunc_BltIn_CheckIrq and
148 * iemThreadedFunc_BltIn_CheckTimersAndIrqs that checks for pending FFs
149 * and IRQs, and if it's only the latter whether we can dispatch them now.
150 */
151DECL_FORCE_INLINE(int) iemThreadedFunc_BltIn_CheckIrqCommon(PVMCPUCC pVCpu)
152{
153 /* Get and mask the per-CPU FFs.*/
154 uint64_t const fCpuRaw = pVCpu->fLocalForcedActions;
155 uint64_t fFlags = fCpuRaw & (VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
156 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
157 | VMCPU_FF_TLB_FLUSH
158 | VMCPU_FF_UNHALT ));
159
160 /* OR in VM-wide FFs and check them together. */
161 uint32_t const fVmRaw = pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions;
162 fFlags |= fVmRaw;
163 if (RT_LIKELY(!fFlags))
164 return VINF_SUCCESS;
165
166 /* Since the VMCPU_FF_INTERUPT_XXX flags was once upon a time in fVm and
167 we haven't reused the bits yet, we can still reliably check whether
168 we're only here for reasons of pending interrupts and whether these
169 are supressed by EFLAGS.IF=0 or interrupt shadowing. */
170 Assert(!(fVmRaw & (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
171 AssertCompile((VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) == 3);
172 if ( fFlags <= (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
173 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
174 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)))
175 return VINF_SUCCESS;
176
177 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
178 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpuRaw, fVmRaw, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
179 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
180 return VINF_IEM_REEXEC_BREAK;
181}
182
183
184/**
185 * Built-in function that checks for pending interrupts that can be delivered or
186 * forced action flags.
187 *
188 * This triggers after the completion of an instruction, so EIP is already at
189 * the next instruction. If an IRQ or important FF is pending, this will return
190 * a non-zero status that stops TB execution.
191 */
192/** @todo add VMX / SVM variants of this. */
193IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
194{
195 RT_NOREF(uParam0, uParam1, uParam2);
196 return iemThreadedFunc_BltIn_CheckIrqCommon(pVCpu);
197}
198
199
200/**
201 * Built-in function that works the cTbsTillNextTimerPoll counter on direct TB
202 * linking, like loop-jumps.
203 */
204IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimers)
205{
206 if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0))
207 return VINF_SUCCESS;
208
209 Log(("%04x:%08RX32: Check timers\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip));
210 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckTimersBreaks);
211 RT_NOREF(uParam0, uParam1, uParam2);
212 return VINF_IEM_REEXEC_BREAK;
213}
214
215
216/**
217 * Combined BltIn_CheckTimers + BltIn_CheckIrq for direct linking.
218 */
219IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimersAndIrq)
220{
221 if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0))
222 return iemThreadedFunc_BltIn_CheckIrqCommon(pVCpu);
223
224 Log(("%04x:%08RX32: Check timers\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip));
225 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckTimersBreaks);
226 RT_NOREF(uParam0, uParam1, uParam2);
227 return VINF_IEM_REEXEC_BREAK;
228}
229
230
231/**
232 * Built-in function that compares the fExec mask against uParam0.
233 *
234 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
235 * an instruction.
236 */
237IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
238{
239 uint32_t const fExpectedExec = (uint32_t)uParam0;
240 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
241 return VINF_SUCCESS;
242 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
243 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
244 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
245 RT_NOREF(uParam1, uParam2);
246 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
247 return VINF_IEM_REEXEC_BREAK;
248}
249
250
251/**
252 * Built-in function that checks for hardware instruction breakpoints.
253 */
254IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckHwInstrBps)
255{
256 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
257 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
258 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
259 || IEM_IS_GUEST_CPU_AMD(pVCpu));
260 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
261 return VINF_SUCCESS;
262
263 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
264 {
265 LogFlow(("Guest HW bp at %04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
266 rcStrict = iemRaiseDebugException(pVCpu);
267 Assert(rcStrict != VINF_SUCCESS);
268 }
269 else
270 LogFlow(("VBoxDbg HW bp at %04x:%08RX64: %Rrc\n",
271 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict) ));
272 RT_NOREF(uParam0, uParam1, uParam2);
273 return rcStrict;
274}
275
276
277/**
278 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
279 * number of functions.
280 */
281/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
282 * test, since it would require replacing the default firmware. */
283#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
284 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
285 { /* likely */ } \
286 else \
287 { \
288 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
289 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
290 return iemRaiseGeneralProtectionFault0(pVCpu); \
291 } \
292 } while(0)
293
294/**
295 * Macro that considers whether we need CS.LIM checking after a branch or
296 * crossing over to a new page.
297 */
298#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
299 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
300 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
301 { /* likely */ } \
302 else \
303 { \
304 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
305 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
306 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
307 RT_NOREF(a_pTb, a_cbInstr); \
308 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
309 return VINF_IEM_REEXEC_BREAK; \
310 } \
311 } while(0)
312
313/**
314 * Macro that implements opcode (re-)checking.
315 */
316#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
317 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
318 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
319 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
320 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
321 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
322 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
323 { /* likely */ } \
324 else \
325 { \
326 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
327 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
328 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
329 RT_NOREF(a_cbInstr); \
330 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
331 } \
332 } while(0)
333
334/**
335 * Macro that implements TLB loading and updating pbInstrBuf updating for an
336 * instruction crossing into a new page.
337 *
338 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
339 */
340#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
341 pVCpu->iem.s.pbInstrBuf = NULL; \
342 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
343 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
344 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
345 \
346 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
347 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
348 && pVCpu->iem.s.pbInstrBuf)) \
349 { /* likely */ } \
350 else \
351 { \
352 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
353 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
354 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
355 RT_NOREF(a_cbInstr); \
356 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
357 } \
358 } while(0)
359
360/**
361 * Macro that implements TLB loading and updating pbInstrBuf updating when
362 * branching or when crossing a page on an instruction boundrary.
363 *
364 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
365 * it is an inter-page branch and also check the page offset.
366 *
367 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
368 */
369#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
370 /* Is RIP within the current code page? */ \
371 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
372 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
373 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
374 if (off < pVCpu->iem.s.cbInstrBufTotal) \
375 { \
376 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
377 Assert(pVCpu->iem.s.pbInstrBuf); \
378 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
379 | pTb->aRanges[(a_idxRange)].offPhysPage; \
380 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
381 { /* we're good */ } \
382 /** @todo r=bird: Not sure if we need the TB obsolete complication here. \
383 * If we're preceeded by an indirect jump, there is no reason why the TB \
384 * would be 'obsolete' just because this time around the indirect jump ends \
385 * up at the same offset in a different page. This would be real bad for \
386 * indirect trampolines/validators. */ \
387 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
388 { \
389 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
390 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
391 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
392 RT_NOREF(a_cbInstr); \
393 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
394 return VINF_IEM_REEXEC_BREAK; \
395 } \
396 else \
397 { \
398 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
399 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
400 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
401 RT_NOREF(a_cbInstr); \
402 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
403 } \
404 } \
405 else \
406 { \
407 /* Must translate new RIP. */ \
408 pVCpu->iem.s.pbInstrBuf = NULL; \
409 pVCpu->iem.s.offCurInstrStart = 0; \
410 pVCpu->iem.s.offInstrNextByte = 0; \
411 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
412 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
413 \
414 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
415 | pTb->aRanges[(a_idxRange)].offPhysPage; \
416 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
417 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
418 && pVCpu->iem.s.pbInstrBuf) \
419 { /* likely */ } \
420 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
421 && pVCpu->iem.s.pbInstrBuf) \
422 { \
423 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
424 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
425 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
426 RT_NOREF(a_cbInstr); \
427 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
428 return VINF_IEM_REEXEC_BREAK; \
429 } \
430 else \
431 { \
432 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
433 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
434 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
435 RT_NOREF(a_cbInstr); \
436 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
437 } \
438 } \
439 } while(0)
440
441/**
442 * Macro that implements PC check after a conditional branch.
443 */
444#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
445 /* Is RIP within the current code page? */ \
446 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
447 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
448 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
449 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
450 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
451 | (a_pTb)->aRanges[(a_idxRange)].offPhysPage) \
452 + (a_offRange); \
453 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
454 && off < /*pVCpu->iem.s.cbInstrBufTotal - ignore flushes and CS.LIM is check elsewhere*/ X86_PAGE_SIZE) \
455 { /* we're good */ } \
456 else \
457 { \
458 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
459 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
460 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
461 RT_NOREF(a_cbInstr); \
462 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
463 return VINF_IEM_REEXEC_BREAK; \
464 } \
465 } while(0)
466
467
468
469/**
470 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
471 * raising a \#GP(0) if this isn't the case.
472 */
473IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
474{
475 uint32_t const cbInstr = (uint8_t)uParam0;
476 RT_NOREF(uParam1, uParam2);
477 BODY_CHECK_CS_LIM(cbInstr);
478 return VINF_SUCCESS;
479}
480
481
482/**
483 * Built-in function for re-checking opcodes and CS.LIM after an instruction
484 * that may have modified them.
485 */
486IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
487{
488 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
489 uint32_t const cbInstr = (uint8_t)uParam0;
490 uint32_t const idxRange = (uint32_t)uParam1;
491 uint32_t const offRange = (uint32_t)uParam2;
492 BODY_CHECK_CS_LIM(cbInstr);
493 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
494 return VINF_SUCCESS;
495}
496
497
498/**
499 * Built-in function for re-checking opcodes after an instruction that may have
500 * modified them.
501 */
502IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
503{
504 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
505 uint32_t const cbInstr = (uint8_t)uParam0;
506 uint32_t const idxRange = (uint32_t)uParam1;
507 uint32_t const offRange = (uint32_t)uParam2;
508 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
509 return VINF_SUCCESS;
510}
511
512
513/**
514 * Built-in function for re-checking opcodes and considering the need for CS.LIM
515 * checking after an instruction that may have modified them.
516 */
517IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
518{
519 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
520 uint32_t const cbInstr = (uint8_t)uParam0;
521 uint32_t const idxRange = (uint32_t)uParam1;
522 uint32_t const offRange = (uint32_t)uParam2;
523 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
524 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
525 return VINF_SUCCESS;
526}
527
528
529/*
530 * Post-branching checkers.
531 */
532
533/**
534 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
535 * after conditional branching within the same page.
536 *
537 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
538 */
539IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
540{
541 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
542 uint32_t const cbInstr = (uint8_t)uParam0;
543 uint32_t const idxRange = (uint32_t)uParam1;
544 uint32_t const offRange = (uint32_t)uParam2;
545 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
546 BODY_CHECK_CS_LIM(cbInstr);
547 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
548 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
549 //LogFunc(("okay\n"));
550 return VINF_SUCCESS;
551}
552
553
554/**
555 * Built-in function for checking the PC and checking opcodes after conditional
556 * branching within the same page.
557 *
558 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
559 */
560IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
561{
562 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
563 uint32_t const cbInstr = (uint8_t)uParam0;
564 uint32_t const idxRange = (uint32_t)uParam1;
565 uint32_t const offRange = (uint32_t)uParam2;
566 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
567 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
568 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
569 //LogFunc(("okay\n"));
570 return VINF_SUCCESS;
571}
572
573
574/**
575 * Built-in function for checking the PC and checking opcodes and considering
576 * the need for CS.LIM checking after conditional branching within the same
577 * page.
578 *
579 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
580 */
581IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
582{
583 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
584 uint32_t const cbInstr = (uint8_t)uParam0;
585 uint32_t const idxRange = (uint32_t)uParam1;
586 uint32_t const offRange = (uint32_t)uParam2;
587 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
588 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
589 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
590 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
591 //LogFunc(("okay\n"));
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
598 * transitioning to a different code page.
599 *
600 * The code page transition can either be natural over onto the next page (with
601 * the instruction starting at page offset zero) or by means of branching.
602 *
603 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
604 */
605IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
606{
607 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
608 uint32_t const cbInstr = (uint8_t)uParam0;
609 uint32_t const idxRange = (uint32_t)uParam1;
610 uint32_t const offRange = (uint32_t)uParam2;
611 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
612 BODY_CHECK_CS_LIM(cbInstr);
613 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
614 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
615 //LogFunc(("okay\n"));
616 return VINF_SUCCESS;
617}
618
619
620/**
621 * Built-in function for loading TLB and checking opcodes when transitioning to
622 * a different code page.
623 *
624 * The code page transition can either be natural over onto the next page (with
625 * the instruction starting at page offset zero) or by means of branching.
626 *
627 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
628 */
629IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
630{
631 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
632 uint32_t const cbInstr = (uint8_t)uParam0;
633 uint32_t const idxRange = (uint32_t)uParam1;
634 uint32_t const offRange = (uint32_t)uParam2;
635 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
636 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
637 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
638 //LogFunc(("okay\n"));
639 return VINF_SUCCESS;
640}
641
642
643/**
644 * Built-in function for loading TLB and checking opcodes and considering the
645 * need for CS.LIM checking when transitioning to a different code page.
646 *
647 * The code page transition can either be natural over onto the next page (with
648 * the instruction starting at page offset zero) or by means of branching.
649 *
650 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
651 */
652IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
653{
654 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
655 uint32_t const cbInstr = (uint8_t)uParam0;
656 uint32_t const idxRange = (uint32_t)uParam1;
657 uint32_t const offRange = (uint32_t)uParam2;
658 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
659 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
660 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
661 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
662 //LogFunc(("okay\n"));
663 return VINF_SUCCESS;
664}
665
666
667
668/*
669 * Natural page crossing checkers.
670 */
671
672/**
673 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
674 * both pages when transitioning to a different code page.
675 *
676 * This is used when the previous instruction requires revalidation of opcodes
677 * bytes and the current instruction stries a page boundrary with opcode bytes
678 * in both the old and new page.
679 *
680 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
681 */
682IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
683{
684 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
685 uint32_t const cbInstr = (uint8_t)uParam0;
686 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
687 uint32_t const idxRange1 = (uint32_t)uParam1;
688 uint32_t const offRange1 = (uint32_t)uParam2;
689 uint32_t const idxRange2 = idxRange1 + 1;
690 BODY_CHECK_CS_LIM(cbInstr);
691 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
692 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
693 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
694 return VINF_SUCCESS;
695}
696
697
698/**
699 * Built-in function for loading TLB and checking opcodes on both pages when
700 * transitioning to a different code page.
701 *
702 * This is used when the previous instruction requires revalidation of opcodes
703 * bytes and the current instruction stries a page boundrary with opcode bytes
704 * in both the old and new page.
705 *
706 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
707 */
708IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
709{
710 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
711 uint32_t const cbInstr = (uint8_t)uParam0;
712 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
713 uint32_t const idxRange1 = (uint32_t)uParam1;
714 uint32_t const offRange1 = (uint32_t)uParam2;
715 uint32_t const idxRange2 = idxRange1 + 1;
716 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
717 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
718 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
719 return VINF_SUCCESS;
720}
721
722
723/**
724 * Built-in function for loading TLB and checking opcodes on both pages and
725 * considering the need for CS.LIM checking when transitioning to a different
726 * code page.
727 *
728 * This is used when the previous instruction requires revalidation of opcodes
729 * bytes and the current instruction stries a page boundrary with opcode bytes
730 * in both the old and new page.
731 *
732 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
733 */
734IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
735{
736 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
737 uint32_t const cbInstr = (uint8_t)uParam0;
738 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
739 uint32_t const idxRange1 = (uint32_t)uParam1;
740 uint32_t const offRange1 = (uint32_t)uParam2;
741 uint32_t const idxRange2 = idxRange1 + 1;
742 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
743 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
744 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
745 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
746 return VINF_SUCCESS;
747}
748
749
750/**
751 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
752 * advancing naturally to a different code page.
753 *
754 * Only opcodes on the new page is checked.
755 *
756 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
757 */
758IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
759{
760 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
761 uint32_t const cbInstr = (uint8_t)uParam0;
762 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
763 uint32_t const idxRange1 = (uint32_t)uParam1;
764 //uint32_t const offRange1 = (uint32_t)uParam2;
765 uint32_t const idxRange2 = idxRange1 + 1;
766 BODY_CHECK_CS_LIM(cbInstr);
767 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
768 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
769 RT_NOREF(uParam2);
770 return VINF_SUCCESS;
771}
772
773
774/**
775 * Built-in function for loading TLB and checking opcodes when advancing
776 * naturally to a different code page.
777 *
778 * Only opcodes on the new page is checked.
779 *
780 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
781 */
782IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
783{
784 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
785 uint32_t const cbInstr = (uint8_t)uParam0;
786 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
787 uint32_t const idxRange1 = (uint32_t)uParam1;
788 //uint32_t const offRange1 = (uint32_t)uParam2;
789 uint32_t const idxRange2 = idxRange1 + 1;
790 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
791 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
792 RT_NOREF(uParam2);
793 return VINF_SUCCESS;
794}
795
796
797/**
798 * Built-in function for loading TLB and checking opcodes and considering the
799 * need for CS.LIM checking when advancing naturally to a different code page.
800 *
801 * Only opcodes on the new page is checked.
802 *
803 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
804 */
805IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
806{
807 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
808 uint32_t const cbInstr = (uint8_t)uParam0;
809 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
810 uint32_t const idxRange1 = (uint32_t)uParam1;
811 //uint32_t const offRange1 = (uint32_t)uParam2;
812 uint32_t const idxRange2 = idxRange1 + 1;
813 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
814 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
815 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
816 RT_NOREF(uParam2);
817 return VINF_SUCCESS;
818}
819
820
821/**
822 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
823 * advancing naturally to a different code page with first instr at byte 0.
824 *
825 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
826 */
827IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
828{
829 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
830 uint32_t const cbInstr = (uint8_t)uParam0;
831 uint32_t const idxRange = (uint32_t)uParam1;
832 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
833 BODY_CHECK_CS_LIM(cbInstr);
834 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
835 Assert(pVCpu->iem.s.offCurInstrStart == 0);
836 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
837 return VINF_SUCCESS;
838}
839
840
841/**
842 * Built-in function for loading TLB and checking opcodes when advancing
843 * naturally to a different code page with first instr at byte 0.
844 *
845 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
846 */
847IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
848{
849 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
850 uint32_t const cbInstr = (uint8_t)uParam0;
851 uint32_t const idxRange = (uint32_t)uParam1;
852 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
853 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
854 Assert(pVCpu->iem.s.offCurInstrStart == 0);
855 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
856 return VINF_SUCCESS;
857}
858
859
860/**
861 * Built-in function for loading TLB and checking opcodes and considering the
862 * need for CS.LIM checking when advancing naturally to a different code page
863 * with first instr at byte 0.
864 *
865 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
866 */
867IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
868{
869 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
870 uint32_t const cbInstr = (uint8_t)uParam0;
871 uint32_t const idxRange = (uint32_t)uParam1;
872 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
873 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
874 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
875 Assert(pVCpu->iem.s.offCurInstrStart == 0);
876 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
877 return VINF_SUCCESS;
878}
879
880
881/**
882 * Built-in function for jumping in the call sequence.
883 */
884IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Jump)
885{
886 Assert(uParam1 == 0 && uParam2 == 0);
887 RT_NOREF(pVCpu, uParam0, uParam1, uParam2);
888 return VINF_IEM_REEXEC_JUMP;
889}
890
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette