VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 104956

Last change on this file since 104956 was 104468, checked in by vboxsync, 7 months ago

VMM/IEM: Deal with the simples direct 'linking' of TBs scenario for relative jumps, when staying with the same code page. bugref:10656

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.0 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 104468 2024-05-01 00:43:28Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 /* We set fSafeToFree to false where as we're being called in the context
76 of a TB callback function, which for native TBs means we cannot release
77 the executable memory till we've returned our way back to iemTbExec as
78 that return path codes via the native code generated for the TB. */
79 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
80 return VINF_IEM_REEXEC_BREAK;
81}
82
83
84/**
85 * Built-in function that does absolutely nothing - for debugging.
86 *
87 * This can be used for artifically increasing the number of calls generated, or
88 * for triggering flushes associated with threaded calls.
89 */
90IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Nop)
91{
92 RT_NOREF(pVCpu, uParam0, uParam1, uParam2);
93 return VINF_SUCCESS;
94}
95
96
97
98/**
99 * This is also called from iemNativeHlpAsmSafeWrapLogCpuState.
100 */
101DECLASM(void) iemThreadedFunc_BltIn_LogCpuStateWorker(PVMCPU pVCpu)
102{
103#ifdef LOG_ENABLED
104 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
105 PCX86FXSTATE const pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
106 Log2(("**** LG%c fExec=%x pTb=%p cUsed=%u\n"
107 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
108 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
109 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
110 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
111 , pTb && (pTb->fFlags & IEMTB_F_TYPE_NATIVE) ? 'n' : 't', pVCpu->iem.s.fExec, pTb, pTb ? pTb->cUsed : 0,
112 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
113 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
115 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
116 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK ));
117#else
118 RT_NOREF(pVCpu);
119#endif
120}
121
122
123/**
124 * Built-in function that logs the current CPU state - for debugging.
125 */
126IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_LogCpuState)
127{
128 iemThreadedFunc_BltIn_LogCpuStateWorker(pVCpu);
129 RT_NOREF(uParam0, uParam1, uParam2);
130 return VINF_SUCCESS;
131}
132
133
134/**
135 * Built-in function that calls a C-implemention function taking zero arguments.
136 */
137IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
138{
139 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
140 uint8_t const cbInstr = (uint8_t)uParam1;
141 RT_NOREF(uParam2);
142 return pfnCImpl(pVCpu, cbInstr);
143}
144
145
146/**
147 * Built-in function that checks for pending interrupts that can be delivered or
148 * forced action flags.
149 *
150 * This triggers after the completion of an instruction, so EIP is already at
151 * the next instruction. If an IRQ or important FF is pending, this will return
152 * a non-zero status that stops TB execution.
153 */
154IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
155{
156 RT_NOREF(uParam0, uParam1, uParam2);
157
158 /*
159 * Check for IRQs and other FFs that needs servicing.
160 */
161 uint64_t fCpu = pVCpu->fLocalForcedActions;
162 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
163 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
164 | VMCPU_FF_TLB_FLUSH
165 | VMCPU_FF_UNHALT );
166 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
167 if (RT_LIKELY( ( !fCpu
168 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
169 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
170 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
171 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
172 return VINF_SUCCESS;
173
174 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
175 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
176 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
177 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
178 return VINF_IEM_REEXEC_BREAK;
179}
180
181
182/**
183 * Built-in function that compares the fExec mask against uParam0.
184 *
185 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
186 * an instruction.
187 */
188IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
189{
190 uint32_t const fExpectedExec = (uint32_t)uParam0;
191 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
192 return VINF_SUCCESS;
193 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
194 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
195 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
196 RT_NOREF(uParam1, uParam2);
197 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
198 return VINF_IEM_REEXEC_BREAK;
199}
200
201
202/**
203 * Built-in function that checks for hardware instruction breakpoints.
204 */
205IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckHwInstrBps)
206{
207 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
208 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
209 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
210 return VINF_SUCCESS;
211
212 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
213 {
214 LogFlow(("Guest HW bp at %04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
215 rcStrict = iemRaiseDebugException(pVCpu);
216 Assert(rcStrict != VINF_SUCCESS);
217 }
218 else
219 LogFlow(("VBoxDbg HW bp at %04x:%08RX64: %Rrc\n",
220 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict) ));
221 RT_NOREF(uParam0, uParam1, uParam2);
222 return rcStrict;
223}
224
225
226/**
227 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
228 * number of functions.
229 */
230/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
231 * test, since it would require replacing the default firmware. */
232#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
233 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
234 { /* likely */ } \
235 else \
236 { \
237 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
238 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
239 return iemRaiseGeneralProtectionFault0(pVCpu); \
240 } \
241 } while(0)
242
243/**
244 * Macro that considers whether we need CS.LIM checking after a branch or
245 * crossing over to a new page.
246 */
247#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
248 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
249 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
250 { /* likely */ } \
251 else \
252 { \
253 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
254 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
255 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
256 RT_NOREF(a_pTb, a_cbInstr); \
257 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
258 return VINF_IEM_REEXEC_BREAK; \
259 } \
260 } while(0)
261
262/**
263 * Macro that implements opcode (re-)checking.
264 */
265#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
266 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
267 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
268 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
269 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
270 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
271 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
272 { /* likely */ } \
273 else \
274 { \
275 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
276 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
277 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
278 RT_NOREF(a_cbInstr); \
279 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
280 } \
281 } while(0)
282
283/**
284 * Macro that implements TLB loading and updating pbInstrBuf updating for an
285 * instruction crossing into a new page.
286 *
287 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
288 */
289#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
290 pVCpu->iem.s.pbInstrBuf = NULL; \
291 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
292 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
293 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
294 \
295 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
296 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
297 && pVCpu->iem.s.pbInstrBuf)) \
298 { /* likely */ } \
299 else \
300 { \
301 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
302 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
303 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
304 RT_NOREF(a_cbInstr); \
305 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
306 } \
307 } while(0)
308
309/**
310 * Macro that implements TLB loading and updating pbInstrBuf updating when
311 * branching or when crossing a page on an instruction boundrary.
312 *
313 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
314 * it is an inter-page branch and also check the page offset.
315 *
316 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
317 */
318#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
319 /* Is RIP within the current code page? */ \
320 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
321 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
322 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
323 if (off < pVCpu->iem.s.cbInstrBufTotal) \
324 { \
325 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
326 Assert(pVCpu->iem.s.pbInstrBuf); \
327 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
328 | pTb->aRanges[(a_idxRange)].offPhysPage; \
329 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
330 { /* we're good */ } \
331 /** @todo r=bird: Not sure if we need the TB obsolete complication here. \
332 * If we're preceeded by an indirect jump, there is no reason why the TB \
333 * would be 'obsolete' just because this time around the indirect jump ends \
334 * up at the same offset in a different page. This would be real bad for \
335 * indirect trampolines/validators. */ \
336 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
337 { \
338 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
339 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
340 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
341 RT_NOREF(a_cbInstr); \
342 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
343 return VINF_IEM_REEXEC_BREAK; \
344 } \
345 else \
346 { \
347 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
348 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
349 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
350 RT_NOREF(a_cbInstr); \
351 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
352 } \
353 } \
354 else \
355 { \
356 /* Must translate new RIP. */ \
357 pVCpu->iem.s.pbInstrBuf = NULL; \
358 pVCpu->iem.s.offCurInstrStart = 0; \
359 pVCpu->iem.s.offInstrNextByte = 0; \
360 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
361 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
362 \
363 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
364 | pTb->aRanges[(a_idxRange)].offPhysPage; \
365 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
366 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
367 && pVCpu->iem.s.pbInstrBuf) \
368 { /* likely */ } \
369 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
370 && pVCpu->iem.s.pbInstrBuf) \
371 { \
372 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
373 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
374 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
375 RT_NOREF(a_cbInstr); \
376 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
377 return VINF_IEM_REEXEC_BREAK; \
378 } \
379 else \
380 { \
381 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
382 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
383 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
384 RT_NOREF(a_cbInstr); \
385 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
386 } \
387 } \
388 } while(0)
389
390/**
391 * Macro that implements PC check after a conditional branch.
392 */
393#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
394 /* Is RIP within the current code page? */ \
395 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
396 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
397 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
398 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
399 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
400 | (a_pTb)->aRanges[(a_idxRange)].offPhysPage) \
401 + (a_offRange); \
402 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
403 && off < /*pVCpu->iem.s.cbInstrBufTotal - ignore flushes and CS.LIM is check elsewhere*/ X86_PAGE_SIZE) \
404 { /* we're good */ } \
405 else \
406 { \
407 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
408 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
409 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
410 RT_NOREF(a_cbInstr); \
411 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
412 return VINF_IEM_REEXEC_BREAK; \
413 } \
414 } while(0)
415
416
417
418/**
419 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
420 * raising a \#GP(0) if this isn't the case.
421 */
422IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
423{
424 uint32_t const cbInstr = (uint8_t)uParam0;
425 RT_NOREF(uParam1, uParam2);
426 BODY_CHECK_CS_LIM(cbInstr);
427 return VINF_SUCCESS;
428}
429
430
431/**
432 * Built-in function for re-checking opcodes and CS.LIM after an instruction
433 * that may have modified them.
434 */
435IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
436{
437 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
438 uint32_t const cbInstr = (uint8_t)uParam0;
439 uint32_t const idxRange = (uint32_t)uParam1;
440 uint32_t const offRange = (uint32_t)uParam2;
441 BODY_CHECK_CS_LIM(cbInstr);
442 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
443 return VINF_SUCCESS;
444}
445
446
447/**
448 * Built-in function for re-checking opcodes after an instruction that may have
449 * modified them.
450 */
451IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
452{
453 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
454 uint32_t const cbInstr = (uint8_t)uParam0;
455 uint32_t const idxRange = (uint32_t)uParam1;
456 uint32_t const offRange = (uint32_t)uParam2;
457 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
458 return VINF_SUCCESS;
459}
460
461
462/**
463 * Built-in function for re-checking opcodes and considering the need for CS.LIM
464 * checking after an instruction that may have modified them.
465 */
466IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
467{
468 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
469 uint32_t const cbInstr = (uint8_t)uParam0;
470 uint32_t const idxRange = (uint32_t)uParam1;
471 uint32_t const offRange = (uint32_t)uParam2;
472 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
473 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
474 return VINF_SUCCESS;
475}
476
477
478/*
479 * Post-branching checkers.
480 */
481
482/**
483 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
484 * after conditional branching within the same page.
485 *
486 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
487 */
488IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
489{
490 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
491 uint32_t const cbInstr = (uint8_t)uParam0;
492 uint32_t const idxRange = (uint32_t)uParam1;
493 uint32_t const offRange = (uint32_t)uParam2;
494 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
495 BODY_CHECK_CS_LIM(cbInstr);
496 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
497 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
498 //LogFunc(("okay\n"));
499 return VINF_SUCCESS;
500}
501
502
503/**
504 * Built-in function for checking the PC and checking opcodes after conditional
505 * branching within the same page.
506 *
507 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
508 */
509IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
510{
511 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
512 uint32_t const cbInstr = (uint8_t)uParam0;
513 uint32_t const idxRange = (uint32_t)uParam1;
514 uint32_t const offRange = (uint32_t)uParam2;
515 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
516 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
517 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
518 //LogFunc(("okay\n"));
519 return VINF_SUCCESS;
520}
521
522
523/**
524 * Built-in function for checking the PC and checking opcodes and considering
525 * the need for CS.LIM checking after conditional branching within the same
526 * page.
527 *
528 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
529 */
530IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
531{
532 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
533 uint32_t const cbInstr = (uint8_t)uParam0;
534 uint32_t const idxRange = (uint32_t)uParam1;
535 uint32_t const offRange = (uint32_t)uParam2;
536 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
537 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
538 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
539 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
540 //LogFunc(("okay\n"));
541 return VINF_SUCCESS;
542}
543
544
545/**
546 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
547 * transitioning to a different code page.
548 *
549 * The code page transition can either be natural over onto the next page (with
550 * the instruction starting at page offset zero) or by means of branching.
551 *
552 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
553 */
554IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
555{
556 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
557 uint32_t const cbInstr = (uint8_t)uParam0;
558 uint32_t const idxRange = (uint32_t)uParam1;
559 uint32_t const offRange = (uint32_t)uParam2;
560 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
561 BODY_CHECK_CS_LIM(cbInstr);
562 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
563 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
564 //LogFunc(("okay\n"));
565 return VINF_SUCCESS;
566}
567
568
569/**
570 * Built-in function for loading TLB and checking opcodes when transitioning to
571 * a different code page.
572 *
573 * The code page transition can either be natural over onto the next page (with
574 * the instruction starting at page offset zero) or by means of branching.
575 *
576 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
577 */
578IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
579{
580 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
581 uint32_t const cbInstr = (uint8_t)uParam0;
582 uint32_t const idxRange = (uint32_t)uParam1;
583 uint32_t const offRange = (uint32_t)uParam2;
584 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
585 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
586 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
587 //LogFunc(("okay\n"));
588 return VINF_SUCCESS;
589}
590
591
592/**
593 * Built-in function for loading TLB and checking opcodes and considering the
594 * need for CS.LIM checking when transitioning to a different code page.
595 *
596 * The code page transition can either be natural over onto the next page (with
597 * the instruction starting at page offset zero) or by means of branching.
598 *
599 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
600 */
601IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
602{
603 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
604 uint32_t const cbInstr = (uint8_t)uParam0;
605 uint32_t const idxRange = (uint32_t)uParam1;
606 uint32_t const offRange = (uint32_t)uParam2;
607 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
608 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
609 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
610 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
611 //LogFunc(("okay\n"));
612 return VINF_SUCCESS;
613}
614
615
616
617/*
618 * Natural page crossing checkers.
619 */
620
621/**
622 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
623 * both pages when transitioning to a different code page.
624 *
625 * This is used when the previous instruction requires revalidation of opcodes
626 * bytes and the current instruction stries a page boundrary with opcode bytes
627 * in both the old and new page.
628 *
629 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
630 */
631IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
632{
633 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
634 uint32_t const cbInstr = (uint8_t)uParam0;
635 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
636 uint32_t const idxRange1 = (uint32_t)uParam1;
637 uint32_t const offRange1 = (uint32_t)uParam2;
638 uint32_t const idxRange2 = idxRange1 + 1;
639 BODY_CHECK_CS_LIM(cbInstr);
640 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
641 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
642 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
643 return VINF_SUCCESS;
644}
645
646
647/**
648 * Built-in function for loading TLB and checking opcodes on both pages when
649 * transitioning to a different code page.
650 *
651 * This is used when the previous instruction requires revalidation of opcodes
652 * bytes and the current instruction stries a page boundrary with opcode bytes
653 * in both the old and new page.
654 *
655 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
656 */
657IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
658{
659 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
660 uint32_t const cbInstr = (uint8_t)uParam0;
661 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
662 uint32_t const idxRange1 = (uint32_t)uParam1;
663 uint32_t const offRange1 = (uint32_t)uParam2;
664 uint32_t const idxRange2 = idxRange1 + 1;
665 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
666 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
667 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
668 return VINF_SUCCESS;
669}
670
671
672/**
673 * Built-in function for loading TLB and checking opcodes on both pages and
674 * considering the need for CS.LIM checking when transitioning to a different
675 * code page.
676 *
677 * This is used when the previous instruction requires revalidation of opcodes
678 * bytes and the current instruction stries a page boundrary with opcode bytes
679 * in both the old and new page.
680 *
681 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
682 */
683IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
684{
685 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
686 uint32_t const cbInstr = (uint8_t)uParam0;
687 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
688 uint32_t const idxRange1 = (uint32_t)uParam1;
689 uint32_t const offRange1 = (uint32_t)uParam2;
690 uint32_t const idxRange2 = idxRange1 + 1;
691 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
692 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
693 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
694 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
695 return VINF_SUCCESS;
696}
697
698
699/**
700 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
701 * advancing naturally to a different code page.
702 *
703 * Only opcodes on the new page is checked.
704 *
705 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
706 */
707IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
708{
709 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
710 uint32_t const cbInstr = (uint8_t)uParam0;
711 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
712 uint32_t const idxRange1 = (uint32_t)uParam1;
713 //uint32_t const offRange1 = (uint32_t)uParam2;
714 uint32_t const idxRange2 = idxRange1 + 1;
715 BODY_CHECK_CS_LIM(cbInstr);
716 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
717 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
718 RT_NOREF(uParam2);
719 return VINF_SUCCESS;
720}
721
722
723/**
724 * Built-in function for loading TLB and checking opcodes when advancing
725 * naturally to a different code page.
726 *
727 * Only opcodes on the new page is checked.
728 *
729 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
730 */
731IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
732{
733 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
734 uint32_t const cbInstr = (uint8_t)uParam0;
735 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
736 uint32_t const idxRange1 = (uint32_t)uParam1;
737 //uint32_t const offRange1 = (uint32_t)uParam2;
738 uint32_t const idxRange2 = idxRange1 + 1;
739 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
740 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
741 RT_NOREF(uParam2);
742 return VINF_SUCCESS;
743}
744
745
746/**
747 * Built-in function for loading TLB and checking opcodes and considering the
748 * need for CS.LIM checking when advancing naturally to a different code page.
749 *
750 * Only opcodes on the new page is checked.
751 *
752 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
753 */
754IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
755{
756 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
757 uint32_t const cbInstr = (uint8_t)uParam0;
758 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
759 uint32_t const idxRange1 = (uint32_t)uParam1;
760 //uint32_t const offRange1 = (uint32_t)uParam2;
761 uint32_t const idxRange2 = idxRange1 + 1;
762 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
763 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
764 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
765 RT_NOREF(uParam2);
766 return VINF_SUCCESS;
767}
768
769
770/**
771 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
772 * advancing naturally to a different code page with first instr at byte 0.
773 *
774 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
775 */
776IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
777{
778 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
779 uint32_t const cbInstr = (uint8_t)uParam0;
780 uint32_t const idxRange = (uint32_t)uParam1;
781 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
782 BODY_CHECK_CS_LIM(cbInstr);
783 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
784 Assert(pVCpu->iem.s.offCurInstrStart == 0);
785 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
786 return VINF_SUCCESS;
787}
788
789
790/**
791 * Built-in function for loading TLB and checking opcodes when advancing
792 * naturally to a different code page with first instr at byte 0.
793 *
794 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
795 */
796IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
797{
798 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
799 uint32_t const cbInstr = (uint8_t)uParam0;
800 uint32_t const idxRange = (uint32_t)uParam1;
801 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
802 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
803 Assert(pVCpu->iem.s.offCurInstrStart == 0);
804 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
805 return VINF_SUCCESS;
806}
807
808
809/**
810 * Built-in function for loading TLB and checking opcodes and considering the
811 * need for CS.LIM checking when advancing naturally to a different code page
812 * with first instr at byte 0.
813 *
814 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
815 */
816IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
817{
818 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
819 uint32_t const cbInstr = (uint8_t)uParam0;
820 uint32_t const idxRange = (uint32_t)uParam1;
821 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
822 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
823 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
824 Assert(pVCpu->iem.s.offCurInstrStart == 0);
825 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
826 return VINF_SUCCESS;
827}
828
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette