VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp

Last change on this file was 108260, checked in by vboxsync, 5 weeks ago

VMM/IEM: Splitting up IEMInline.h. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 36.3 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 108260 2025-02-17 15:24:14Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#ifdef IN_RING0
38# define VBOX_VMM_TARGET_X86
39#endif
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/cpum.h>
42#include <VBox/vmm/pdmapic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/pgm.h>
45#include <VBox/vmm/iom.h>
46#include <VBox/vmm/em.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/vmm/nem.h>
49#include <VBox/vmm/gim.h>
50#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
51# include <VBox/vmm/em.h>
52# include <VBox/vmm/hm_svm.h>
53#endif
54#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
55# include <VBox/vmm/hmvmxinline.h>
56#endif
57#include <VBox/vmm/tm.h>
58#include <VBox/vmm/dbgf.h>
59#include <VBox/vmm/dbgftrace.h>
60#include "IEMInternal.h"
61#include <VBox/vmm/vmcc.h>
62#include <VBox/log.h>
63#include <VBox/err.h>
64#include <VBox/param.h>
65#include <VBox/dis.h>
66#include <VBox/disopcode-x86-amd64.h>
67#include <iprt/asm-math.h>
68#include <iprt/assert.h>
69#include <iprt/string.h>
70#include <iprt/x86.h>
71
72#include "IEMInline.h"
73#ifdef VBOX_VMM_TARGET_X86
74# include "target-x86/IEMInline-x86.h"
75#endif
76
77
78
79static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
80{
81 /* We set fSafeToFree to false where as we're being called in the context
82 of a TB callback function, which for native TBs means we cannot release
83 the executable memory till we've returned our way back to iemTbExec as
84 that return path codes via the native code generated for the TB. */
85 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
86 return VINF_IEM_REEXEC_BREAK;
87}
88
89
90/**
91 * Built-in function that does absolutely nothing - for debugging.
92 *
93 * This can be used for artifically increasing the number of calls generated, or
94 * for triggering flushes associated with threaded calls.
95 */
96IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Nop)
97{
98 RT_NOREF(pVCpu, uParam0, uParam1, uParam2);
99 return VINF_SUCCESS;
100}
101
102
103
104/**
105 * This is also called from iemNativeHlpAsmSafeWrapLogCpuState.
106 */
107DECLASM(void) iemThreadedFunc_BltIn_LogCpuStateWorker(PVMCPU pVCpu)
108{
109#ifdef LOG_ENABLED
110 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
111 PCX86FXSTATE const pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
112 Log2(("**** LG%c fExec=%x pTb=%p cUsed=%u\n"
113 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
114 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
115 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
116 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
117 , pTb && (pTb->fFlags & IEMTB_F_TYPE_NATIVE) ? 'n' : 't', pVCpu->iem.s.fExec, pTb, pTb ? pTb->cUsed : 0,
118 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
119 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
120 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
121 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
122 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK ));
123#else
124 RT_NOREF(pVCpu);
125#endif
126}
127
128
129/**
130 * Built-in function that logs the current CPU state - for debugging.
131 */
132IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_LogCpuState)
133{
134 iemThreadedFunc_BltIn_LogCpuStateWorker(pVCpu);
135 RT_NOREF(uParam0, uParam1, uParam2);
136 return VINF_SUCCESS;
137}
138
139
140/**
141 * Built-in function that calls a C-implemention function taking zero arguments.
142 */
143IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
144{
145 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
146 uint8_t const cbInstr = (uint8_t)uParam1;
147 RT_NOREF(uParam2);
148 return pfnCImpl(pVCpu, cbInstr);
149}
150
151
152/**
153 * Worker for iemThreadedFunc_BltIn_CheckIrq and
154 * iemThreadedFunc_BltIn_CheckTimersAndIrqs that checks for pending FFs
155 * and IRQs, and if it's only the latter whether we can dispatch them now.
156 */
157DECL_FORCE_INLINE(int) iemThreadedFunc_BltIn_CheckIrqCommon(PVMCPUCC pVCpu)
158{
159 /* Get and mask the per-CPU FFs.*/
160 uint64_t const fCpuRaw = pVCpu->fLocalForcedActions;
161 uint64_t fFlags = fCpuRaw & (VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
162 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
163 | VMCPU_FF_TLB_FLUSH
164 | VMCPU_FF_UNHALT ));
165
166 /* OR in VM-wide FFs and check them together. */
167 uint32_t const fVmRaw = pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions;
168 fFlags |= fVmRaw;
169 if (RT_LIKELY(!fFlags))
170 return VINF_SUCCESS;
171
172 /* Since the VMCPU_FF_INTERUPT_XXX flags was once upon a time in fVm and
173 we haven't reused the bits yet, we can still reliably check whether
174 we're only here for reasons of pending interrupts and whether these
175 are suppressed by EFLAGS.IF=0 or interrupt shadowing. */
176 Assert(!(fVmRaw & (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
177 AssertCompile((VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) == 3);
178 if ( fFlags <= (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
179 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
180 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)))
181 return VINF_SUCCESS;
182
183 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
184 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpuRaw, fVmRaw, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
185 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
186 return VINF_IEM_REEXEC_BREAK;
187}
188
189
190/**
191 * Built-in function that checks for pending interrupts that can be delivered or
192 * forced action flags.
193 *
194 * This triggers after the completion of an instruction, so EIP is already at
195 * the next instruction. If an IRQ or important FF is pending, this will return
196 * a non-zero status that stops TB execution.
197 */
198/** @todo add VMX / SVM variants of this. */
199IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
200{
201 RT_NOREF(uParam0, uParam1, uParam2);
202 return iemThreadedFunc_BltIn_CheckIrqCommon(pVCpu);
203}
204
205
206/**
207 * Built-in function that works the cTbsTillNextTimerPoll counter on direct TB
208 * linking, like loop-jumps.
209 */
210IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimers)
211{
212 if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0))
213 return VINF_SUCCESS;
214
215 Log(("%04x:%08RX32: Check timers\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip));
216 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckTimersBreaks);
217 RT_NOREF(uParam0, uParam1, uParam2);
218 return VINF_IEM_REEXEC_BREAK;
219}
220
221
222/**
223 * Combined BltIn_CheckTimers + BltIn_CheckIrq for direct linking.
224 */
225IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimersAndIrq)
226{
227 if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0))
228 return iemThreadedFunc_BltIn_CheckIrqCommon(pVCpu);
229
230 Log(("%04x:%08RX32: Check timers\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip));
231 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckTimersBreaks);
232 RT_NOREF(uParam0, uParam1, uParam2);
233 return VINF_IEM_REEXEC_BREAK;
234}
235
236
237/**
238 * Built-in function that compares the fExec mask against uParam0.
239 *
240 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
241 * an instruction.
242 */
243IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
244{
245 uint32_t const fExpectedExec = (uint32_t)uParam0;
246 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
247 return VINF_SUCCESS;
248 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
249 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
250 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
251 RT_NOREF(uParam1, uParam2);
252 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
253 return VINF_IEM_REEXEC_BREAK;
254}
255
256
257/**
258 * Built-in function that checks for hardware instruction breakpoints.
259 */
260IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckHwInstrBps)
261{
262 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
263 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
264 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
265 || IEM_IS_GUEST_CPU_AMD(pVCpu));
266 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
267 return VINF_SUCCESS;
268
269 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
270 {
271 LogFlow(("Guest HW bp at %04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
272 rcStrict = iemRaiseDebugException(pVCpu);
273 Assert(rcStrict != VINF_SUCCESS);
274 }
275 else
276 LogFlow(("VBoxDbg HW bp at %04x:%08RX64: %Rrc\n",
277 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict) ));
278 RT_NOREF(uParam0, uParam1, uParam2);
279 return rcStrict;
280}
281
282
283/**
284 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
285 * number of functions.
286 */
287/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
288 * test, since it would require replacing the default firmware. */
289#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
290 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
291 { /* likely */ } \
292 else \
293 { \
294 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
295 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
296 return iemRaiseGeneralProtectionFault0(pVCpu); \
297 } \
298 } while(0)
299
300/**
301 * Macro that considers whether we need CS.LIM checking after a branch or
302 * crossing over to a new page.
303 */
304#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
305 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
306 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
307 { /* likely */ } \
308 else \
309 { \
310 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
311 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
312 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
313 RT_NOREF(a_pTb, a_cbInstr); \
314 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
315 return VINF_IEM_REEXEC_BREAK; \
316 } \
317 } while(0)
318
319/**
320 * Macro that implements opcode (re-)checking.
321 */
322#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
323 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
324 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
325 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
326 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
327 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
328 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
329 { /* likely */ } \
330 else \
331 { \
332 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
333 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
334 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
335 RT_NOREF(a_cbInstr); \
336 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
337 } \
338 } while(0)
339
340/**
341 * Macro that implements TLB loading and updating pbInstrBuf updating for an
342 * instruction crossing into a new page.
343 *
344 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
345 */
346#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
347 pVCpu->iem.s.pbInstrBuf = NULL; \
348 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
349 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
350 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
351 \
352 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
353 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
354 && pVCpu->iem.s.pbInstrBuf)) \
355 { /* likely */ } \
356 else \
357 { \
358 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
359 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
360 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
361 RT_NOREF(a_cbInstr); \
362 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
363 } \
364 } while(0)
365
366/**
367 * Macro that implements TLB loading and updating pbInstrBuf updating when
368 * branching or when crossing a page on an instruction boundrary.
369 *
370 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
371 * it is an inter-page branch and also check the page offset.
372 *
373 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
374 */
375#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
376 /* Is RIP within the current code page? */ \
377 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
378 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
379 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
380 if (off < pVCpu->iem.s.cbInstrBufTotal) \
381 { \
382 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
383 Assert(pVCpu->iem.s.pbInstrBuf); \
384 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
385 | pTb->aRanges[(a_idxRange)].offPhysPage; \
386 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
387 { /* we're good */ } \
388 /** @todo r=bird: Not sure if we need the TB obsolete complication here. \
389 * If we're preceeded by an indirect jump, there is no reason why the TB \
390 * would be 'obsolete' just because this time around the indirect jump ends \
391 * up at the same offset in a different page. This would be real bad for \
392 * indirect trampolines/validators. */ \
393 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
394 { \
395 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
396 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
397 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
398 RT_NOREF(a_cbInstr); \
399 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
400 return VINF_IEM_REEXEC_BREAK; \
401 } \
402 else \
403 { \
404 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
405 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
406 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
407 RT_NOREF(a_cbInstr); \
408 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
409 } \
410 } \
411 else \
412 { \
413 /* Must translate new RIP. */ \
414 pVCpu->iem.s.pbInstrBuf = NULL; \
415 pVCpu->iem.s.offCurInstrStart = 0; \
416 pVCpu->iem.s.offInstrNextByte = 0; \
417 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
418 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
419 \
420 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
421 | pTb->aRanges[(a_idxRange)].offPhysPage; \
422 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
423 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
424 && pVCpu->iem.s.pbInstrBuf) \
425 { /* likely */ } \
426 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
427 && pVCpu->iem.s.pbInstrBuf) \
428 { \
429 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
430 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
431 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
432 RT_NOREF(a_cbInstr); \
433 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
434 return VINF_IEM_REEXEC_BREAK; \
435 } \
436 else \
437 { \
438 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
439 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
440 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
441 RT_NOREF(a_cbInstr); \
442 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
443 } \
444 } \
445 } while(0)
446
447/**
448 * Macro that implements PC check after a conditional branch.
449 */
450#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
451 /* Is RIP within the current code page? */ \
452 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
453 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
454 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
455 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
456 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
457 | (a_pTb)->aRanges[(a_idxRange)].offPhysPage) \
458 + (a_offRange); \
459 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
460 && off < /*pVCpu->iem.s.cbInstrBufTotal - ignore flushes and CS.LIM is check elsewhere*/ X86_PAGE_SIZE) \
461 { /* we're good */ } \
462 else \
463 { \
464 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
465 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
466 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
467 RT_NOREF(a_cbInstr); \
468 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
469 return VINF_IEM_REEXEC_BREAK; \
470 } \
471 } while(0)
472
473
474
475/**
476 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
477 * raising a \#GP(0) if this isn't the case.
478 */
479IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
480{
481 uint32_t const cbInstr = (uint8_t)uParam0;
482 RT_NOREF(uParam1, uParam2);
483 BODY_CHECK_CS_LIM(cbInstr);
484 return VINF_SUCCESS;
485}
486
487
488/**
489 * Built-in function for re-checking opcodes and CS.LIM after an instruction
490 * that may have modified them.
491 */
492IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
493{
494 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
495 uint32_t const cbInstr = (uint8_t)uParam0;
496 uint32_t const idxRange = (uint32_t)uParam1;
497 uint32_t const offRange = (uint32_t)uParam2;
498 BODY_CHECK_CS_LIM(cbInstr);
499 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
500 return VINF_SUCCESS;
501}
502
503
504/**
505 * Built-in function for re-checking opcodes after an instruction that may have
506 * modified them.
507 */
508IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
509{
510 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
511 uint32_t const cbInstr = (uint8_t)uParam0;
512 uint32_t const idxRange = (uint32_t)uParam1;
513 uint32_t const offRange = (uint32_t)uParam2;
514 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
515 return VINF_SUCCESS;
516}
517
518
519/**
520 * Built-in function for re-checking opcodes and considering the need for CS.LIM
521 * checking after an instruction that may have modified them.
522 */
523IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
524{
525 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
526 uint32_t const cbInstr = (uint8_t)uParam0;
527 uint32_t const idxRange = (uint32_t)uParam1;
528 uint32_t const offRange = (uint32_t)uParam2;
529 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
530 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
531 return VINF_SUCCESS;
532}
533
534
535/*
536 * Post-branching checkers.
537 */
538
539/**
540 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
541 * after conditional branching within the same page.
542 *
543 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
544 */
545IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
546{
547 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
548 uint32_t const cbInstr = (uint8_t)uParam0;
549 uint32_t const idxRange = (uint32_t)uParam1;
550 uint32_t const offRange = (uint32_t)uParam2;
551 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
552 BODY_CHECK_CS_LIM(cbInstr);
553 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
554 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
555 //LogFunc(("okay\n"));
556 return VINF_SUCCESS;
557}
558
559
560/**
561 * Built-in function for checking the PC and checking opcodes after conditional
562 * branching within the same page.
563 *
564 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
565 */
566IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
567{
568 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
569 uint32_t const cbInstr = (uint8_t)uParam0;
570 uint32_t const idxRange = (uint32_t)uParam1;
571 uint32_t const offRange = (uint32_t)uParam2;
572 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
573 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
574 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
575 //LogFunc(("okay\n"));
576 return VINF_SUCCESS;
577}
578
579
580/**
581 * Built-in function for checking the PC and checking opcodes and considering
582 * the need for CS.LIM checking after conditional branching within the same
583 * page.
584 *
585 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
586 */
587IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
588{
589 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
590 uint32_t const cbInstr = (uint8_t)uParam0;
591 uint32_t const idxRange = (uint32_t)uParam1;
592 uint32_t const offRange = (uint32_t)uParam2;
593 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
594 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
595 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
596 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
597 //LogFunc(("okay\n"));
598 return VINF_SUCCESS;
599}
600
601
602/**
603 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
604 * transitioning to a different code page.
605 *
606 * The code page transition can either be natural over onto the next page (with
607 * the instruction starting at page offset zero) or by means of branching.
608 *
609 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
610 */
611IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
612{
613 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
614 uint32_t const cbInstr = (uint8_t)uParam0;
615 uint32_t const idxRange = (uint32_t)uParam1;
616 uint32_t const offRange = (uint32_t)uParam2;
617 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
618 BODY_CHECK_CS_LIM(cbInstr);
619 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
620 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
621 //LogFunc(("okay\n"));
622 return VINF_SUCCESS;
623}
624
625
626/**
627 * Built-in function for loading TLB and checking opcodes when transitioning to
628 * a different code page.
629 *
630 * The code page transition can either be natural over onto the next page (with
631 * the instruction starting at page offset zero) or by means of branching.
632 *
633 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
634 */
635IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
636{
637 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
638 uint32_t const cbInstr = (uint8_t)uParam0;
639 uint32_t const idxRange = (uint32_t)uParam1;
640 uint32_t const offRange = (uint32_t)uParam2;
641 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
642 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
643 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
644 //LogFunc(("okay\n"));
645 return VINF_SUCCESS;
646}
647
648
649/**
650 * Built-in function for loading TLB and checking opcodes and considering the
651 * need for CS.LIM checking when transitioning to a different code page.
652 *
653 * The code page transition can either be natural over onto the next page (with
654 * the instruction starting at page offset zero) or by means of branching.
655 *
656 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
657 */
658IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
659{
660 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
661 uint32_t const cbInstr = (uint8_t)uParam0;
662 uint32_t const idxRange = (uint32_t)uParam1;
663 uint32_t const offRange = (uint32_t)uParam2;
664 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
665 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
666 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
667 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
668 //LogFunc(("okay\n"));
669 return VINF_SUCCESS;
670}
671
672
673
674/*
675 * Natural page crossing checkers.
676 */
677
678/**
679 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
680 * both pages when transitioning to a different code page.
681 *
682 * This is used when the previous instruction requires revalidation of opcodes
683 * bytes and the current instruction stries a page boundrary with opcode bytes
684 * in both the old and new page.
685 *
686 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
687 */
688IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
689{
690 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
691 uint32_t const cbInstr = (uint8_t)uParam0;
692 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
693 uint32_t const idxRange1 = (uint32_t)uParam1;
694 uint32_t const offRange1 = (uint32_t)uParam2;
695 uint32_t const idxRange2 = idxRange1 + 1;
696 BODY_CHECK_CS_LIM(cbInstr);
697 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
698 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
699 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
700 return VINF_SUCCESS;
701}
702
703
704/**
705 * Built-in function for loading TLB and checking opcodes on both pages when
706 * transitioning to a different code page.
707 *
708 * This is used when the previous instruction requires revalidation of opcodes
709 * bytes and the current instruction stries a page boundrary with opcode bytes
710 * in both the old and new page.
711 *
712 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
713 */
714IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
715{
716 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
717 uint32_t const cbInstr = (uint8_t)uParam0;
718 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
719 uint32_t const idxRange1 = (uint32_t)uParam1;
720 uint32_t const offRange1 = (uint32_t)uParam2;
721 uint32_t const idxRange2 = idxRange1 + 1;
722 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
723 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
724 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
725 return VINF_SUCCESS;
726}
727
728
729/**
730 * Built-in function for loading TLB and checking opcodes on both pages and
731 * considering the need for CS.LIM checking when transitioning to a different
732 * code page.
733 *
734 * This is used when the previous instruction requires revalidation of opcodes
735 * bytes and the current instruction stries a page boundrary with opcode bytes
736 * in both the old and new page.
737 *
738 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
739 */
740IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
741{
742 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
743 uint32_t const cbInstr = (uint8_t)uParam0;
744 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
745 uint32_t const idxRange1 = (uint32_t)uParam1;
746 uint32_t const offRange1 = (uint32_t)uParam2;
747 uint32_t const idxRange2 = idxRange1 + 1;
748 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
749 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
750 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
751 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
752 return VINF_SUCCESS;
753}
754
755
756/**
757 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
758 * advancing naturally to a different code page.
759 *
760 * Only opcodes on the new page is checked.
761 *
762 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
763 */
764IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
765{
766 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
767 uint32_t const cbInstr = (uint8_t)uParam0;
768 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
769 uint32_t const idxRange1 = (uint32_t)uParam1;
770 //uint32_t const offRange1 = (uint32_t)uParam2;
771 uint32_t const idxRange2 = idxRange1 + 1;
772 BODY_CHECK_CS_LIM(cbInstr);
773 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
774 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
775 RT_NOREF(uParam2);
776 return VINF_SUCCESS;
777}
778
779
780/**
781 * Built-in function for loading TLB and checking opcodes when advancing
782 * naturally to a different code page.
783 *
784 * Only opcodes on the new page is checked.
785 *
786 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
787 */
788IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
789{
790 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
791 uint32_t const cbInstr = (uint8_t)uParam0;
792 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
793 uint32_t const idxRange1 = (uint32_t)uParam1;
794 //uint32_t const offRange1 = (uint32_t)uParam2;
795 uint32_t const idxRange2 = idxRange1 + 1;
796 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
797 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
798 RT_NOREF(uParam2);
799 return VINF_SUCCESS;
800}
801
802
803/**
804 * Built-in function for loading TLB and checking opcodes and considering the
805 * need for CS.LIM checking when advancing naturally to a different code page.
806 *
807 * Only opcodes on the new page is checked.
808 *
809 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
810 */
811IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
812{
813 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
814 uint32_t const cbInstr = (uint8_t)uParam0;
815 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
816 uint32_t const idxRange1 = (uint32_t)uParam1;
817 //uint32_t const offRange1 = (uint32_t)uParam2;
818 uint32_t const idxRange2 = idxRange1 + 1;
819 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
820 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
821 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
822 RT_NOREF(uParam2);
823 return VINF_SUCCESS;
824}
825
826
827/**
828 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
829 * advancing naturally to a different code page with first instr at byte 0.
830 *
831 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
832 */
833IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
834{
835 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
836 uint32_t const cbInstr = (uint8_t)uParam0;
837 uint32_t const idxRange = (uint32_t)uParam1;
838 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
839 BODY_CHECK_CS_LIM(cbInstr);
840 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
841 Assert(pVCpu->iem.s.offCurInstrStart == 0);
842 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
843 return VINF_SUCCESS;
844}
845
846
847/**
848 * Built-in function for loading TLB and checking opcodes when advancing
849 * naturally to a different code page with first instr at byte 0.
850 *
851 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
852 */
853IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
854{
855 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
856 uint32_t const cbInstr = (uint8_t)uParam0;
857 uint32_t const idxRange = (uint32_t)uParam1;
858 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
859 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
860 Assert(pVCpu->iem.s.offCurInstrStart == 0);
861 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
862 return VINF_SUCCESS;
863}
864
865
866/**
867 * Built-in function for loading TLB and checking opcodes and considering the
868 * need for CS.LIM checking when advancing naturally to a different code page
869 * with first instr at byte 0.
870 *
871 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
872 */
873IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
874{
875 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
876 uint32_t const cbInstr = (uint8_t)uParam0;
877 uint32_t const idxRange = (uint32_t)uParam1;
878 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
879 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
880 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
881 Assert(pVCpu->iem.s.offCurInstrStart == 0);
882 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
883 return VINF_SUCCESS;
884}
885
886
887/**
888 * Built-in function for jumping in the call sequence.
889 */
890IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Jump)
891{
892 Assert(uParam1 == 0 && uParam2 == 0);
893 RT_NOREF(pVCpu, uParam0, uParam1, uParam2);
894 return VINF_IEM_REEXEC_JUMP;
895}
896
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette