VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 102092

Last change on this file since 102092 was 101640, checked in by vboxsync, 13 months ago

VMM/IEM: Emit native code for BltIn_CheckIrq. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.1 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 101640 2023-10-28 01:01:28Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 /* We set fSafeToFree to false where as we're being called in the context
76 of a TB callback function, which for native TBs means we cannot release
77 the executable memory till we've returned our way back to iemTbExec as
78 that return path codes via the native code generated for the TB. */
79 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
80 return VINF_IEM_REEXEC_BREAK;
81}
82
83
84/**
85 * Built-in function that calls a C-implemention function taking zero arguments.
86 */
87IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
88{
89 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
90 uint8_t const cbInstr = (uint8_t)uParam1;
91 RT_NOREF(uParam2);
92 return pfnCImpl(pVCpu, cbInstr);
93}
94
95
96/**
97 * Built-in function that checks for pending interrupts that can be delivered or
98 * forced action flags.
99 *
100 * This triggers after the completion of an instruction, so EIP is already at
101 * the next instruction. If an IRQ or important FF is pending, this will return
102 * a non-zero status that stops TB execution.
103 */
104IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
105{
106 RT_NOREF(uParam0, uParam1, uParam2);
107
108 /*
109 * Check for IRQs and other FFs that needs servicing.
110 */
111 uint64_t fCpu = pVCpu->fLocalForcedActions;
112 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
113 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
114 | VMCPU_FF_TLB_FLUSH
115 | VMCPU_FF_UNHALT );
116 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
117 if (RT_LIKELY( ( !fCpu
118 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
119 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
120 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
121 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
122 return VINF_SUCCESS;
123
124 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
125 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
126 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
127 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
128 return VINF_IEM_REEXEC_BREAK;
129}
130
131
132/**
133 * Built-in function that compares the fExec mask against uParam0.
134 *
135 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
136 * an instruction.
137 */
138IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
139{
140 uint32_t const fExpectedExec = (uint32_t)uParam0;
141 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
142 return VINF_SUCCESS;
143 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
144 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
145 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
146 RT_NOREF(uParam1, uParam2);
147 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
148 return VINF_IEM_REEXEC_BREAK;
149}
150
151
152/**
153 * Built-in function that checks for hardware instruction breakpoints.
154 */
155IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckHwInstrBps)
156{
157 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
158 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
159 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
160 return VINF_SUCCESS;
161
162 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
163 {
164 LogFlow(("Guest HW bp at %04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
165 rcStrict = iemRaiseDebugException(pVCpu);
166 Assert(rcStrict != VINF_SUCCESS);
167 }
168 else
169 LogFlow(("VBoxDbg HW bp at %04x:%08RX64: %Rrc\n",
170 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict) ));
171 RT_NOREF(uParam0, uParam1, uParam2);
172 return rcStrict;
173}
174
175
176DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
177{
178 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
179 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
180 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
181 if (idxPage == 0)
182 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
183 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
184 return pTb->aGCPhysPages[idxPage - 1];
185}
186
187
188/**
189 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
190 * number of functions.
191 */
192/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
193 * test, since it would require replacing the default firmware. */
194#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
195 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
196 { /* likely */ } \
197 else \
198 { \
199 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
200 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
201 return iemRaiseGeneralProtectionFault0(pVCpu); \
202 } \
203 } while(0)
204
205/**
206 * Macro that implements opcode (re-)checking.
207 */
208#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
209 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
210 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
211 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
212 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
213 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
214 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
215 { /* likely */ } \
216 else \
217 { \
218 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
219 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
220 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
221 RT_NOREF(a_cbInstr); \
222 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
223 } \
224 } while(0)
225
226/**
227 * Macro that implements TLB loading and updating pbInstrBuf updating for an
228 * instruction crossing into a new page.
229 *
230 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
231 */
232#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
233 pVCpu->iem.s.pbInstrBuf = NULL; \
234 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
235 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
236 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
237 \
238 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
239 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
240 && pVCpu->iem.s.pbInstrBuf)) \
241 { /* likely */ } \
242 else \
243 { \
244 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
245 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
246 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
247 RT_NOREF(a_cbInstr); \
248 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
249 } \
250 } while(0)
251
252/**
253 * Macro that implements TLB loading and updating pbInstrBuf updating when
254 * branching or when crossing a page on an instruction boundrary.
255 *
256 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
257 * it is an inter-page branch and also check the page offset.
258 *
259 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
260 */
261#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
262 /* Is RIP within the current code page? */ \
263 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
264 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
265 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
266 if (off < pVCpu->iem.s.cbInstrBufTotal) \
267 { \
268 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
269 Assert(pVCpu->iem.s.pbInstrBuf); \
270 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
271 | pTb->aRanges[(a_idxRange)].offPhysPage; \
272 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
273 { /* we're good */ } \
274 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
275 { \
276 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
277 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
278 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
279 RT_NOREF(a_cbInstr); \
280 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
281 return VINF_IEM_REEXEC_BREAK; \
282 } \
283 else \
284 { \
285 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
286 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
287 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
288 RT_NOREF(a_cbInstr); \
289 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
290 } \
291 } \
292 else \
293 { \
294 /* Must translate new RIP. */ \
295 pVCpu->iem.s.pbInstrBuf = NULL; \
296 pVCpu->iem.s.offCurInstrStart = 0; \
297 pVCpu->iem.s.offInstrNextByte = 0; \
298 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
299 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
300 \
301 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
302 | pTb->aRanges[(a_idxRange)].offPhysPage; \
303 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
304 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
305 && pVCpu->iem.s.pbInstrBuf) \
306 { /* likely */ } \
307 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
308 && pVCpu->iem.s.pbInstrBuf) \
309 { \
310 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
311 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
312 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
313 RT_NOREF(a_cbInstr); \
314 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
315 return VINF_IEM_REEXEC_BREAK; \
316 } \
317 else \
318 { \
319 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
320 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
321 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
322 RT_NOREF(a_cbInstr); \
323 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
324 } \
325 } \
326 } while(0)
327
328/**
329 * Macro that implements PC check after a conditional branch.
330 */
331#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
332 /* Is RIP within the current code page? */ \
333 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
334 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
335 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
336 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
337 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
338 | pTb->aRanges[(a_idxRange)].offPhysPage; \
339 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
340 && off < pVCpu->iem.s.cbInstrBufTotal) \
341 { /* we're good */ } \
342 else \
343 { \
344 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
345 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
346 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
347 RT_NOREF(a_cbInstr); \
348 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
349 return VINF_IEM_REEXEC_BREAK; \
350 } \
351 } while(0)
352
353/**
354 * Macro that considers whether we need CS.LIM checking after a branch or
355 * crossing over to a new page.
356 *
357 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
358 */
359#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
360 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
361 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
362 { /* likely */ } \
363 else \
364 { \
365 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
366 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
367 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
368 RT_NOREF(a_pTb, a_cbInstr); \
369 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
370 return VINF_IEM_REEXEC_BREAK; \
371 } \
372 } while(0)
373
374
375
376/**
377 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
378 * raising a \#GP(0) if this isn't the case.
379 */
380IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
381{
382 uint32_t const cbInstr = (uint32_t)uParam0;
383 RT_NOREF(uParam1, uParam2);
384 BODY_CHECK_CS_LIM(cbInstr);
385 return VINF_SUCCESS;
386}
387
388
389/**
390 * Built-in function for re-checking opcodes and CS.LIM after an instruction
391 * that may have modified them.
392 */
393IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
394{
395 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
396 uint32_t const cbInstr = (uint32_t)uParam0;
397 uint32_t const idxRange = (uint32_t)uParam1;
398 uint32_t const offRange = (uint32_t)uParam2;
399 BODY_CHECK_CS_LIM(cbInstr);
400 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
401 return VINF_SUCCESS;
402}
403
404
405/**
406 * Built-in function for re-checking opcodes after an instruction that may have
407 * modified them.
408 */
409IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
410{
411 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
412 uint32_t const cbInstr = (uint32_t)uParam0;
413 uint32_t const idxRange = (uint32_t)uParam1;
414 uint32_t const offRange = (uint32_t)uParam2;
415 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
416 return VINF_SUCCESS;
417}
418
419
420/**
421 * Built-in function for re-checking opcodes and considering the need for CS.LIM
422 * checking after an instruction that may have modified them.
423 */
424IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
425{
426 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
427 uint32_t const cbInstr = (uint32_t)uParam0;
428 uint32_t const idxRange = (uint32_t)uParam1;
429 uint32_t const offRange = (uint32_t)uParam2;
430 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
431 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
432 return VINF_SUCCESS;
433}
434
435
436/*
437 * Post-branching checkers.
438 */
439
440/**
441 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
442 * after conditional branching within the same page.
443 *
444 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
445 */
446IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
447{
448 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
449 uint32_t const cbInstr = (uint32_t)uParam0;
450 uint32_t const idxRange = (uint32_t)uParam1;
451 uint32_t const offRange = (uint32_t)uParam2;
452 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
453 BODY_CHECK_CS_LIM(cbInstr);
454 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
455 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
456 //LogFunc(("okay\n"));
457 return VINF_SUCCESS;
458}
459
460
461/**
462 * Built-in function for checking the PC and checking opcodes after conditional
463 * branching within the same page.
464 *
465 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
466 */
467IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
468{
469 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
470 uint32_t const cbInstr = (uint32_t)uParam0;
471 uint32_t const idxRange = (uint32_t)uParam1;
472 uint32_t const offRange = (uint32_t)uParam2;
473 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
474 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
475 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
476 //LogFunc(("okay\n"));
477 return VINF_SUCCESS;
478}
479
480
481/**
482 * Built-in function for checking the PC and checking opcodes and considering
483 * the need for CS.LIM checking after conditional branching within the same
484 * page.
485 *
486 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
487 */
488IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
489{
490 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
491 uint32_t const cbInstr = (uint32_t)uParam0;
492 uint32_t const idxRange = (uint32_t)uParam1;
493 uint32_t const offRange = (uint32_t)uParam2;
494 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
495 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
496 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
497 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
498 //LogFunc(("okay\n"));
499 return VINF_SUCCESS;
500}
501
502
503/**
504 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
505 * transitioning to a different code page.
506 *
507 * The code page transition can either be natural over onto the next page (with
508 * the instruction starting at page offset zero) or by means of branching.
509 *
510 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
511 */
512IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
513{
514 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
515 uint32_t const cbInstr = (uint32_t)uParam0;
516 uint32_t const idxRange = (uint32_t)uParam1;
517 uint32_t const offRange = (uint32_t)uParam2;
518 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
519 BODY_CHECK_CS_LIM(cbInstr);
520 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
521 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
522 //LogFunc(("okay\n"));
523 return VINF_SUCCESS;
524}
525
526
527/**
528 * Built-in function for loading TLB and checking opcodes when transitioning to
529 * a different code page.
530 *
531 * The code page transition can either be natural over onto the next page (with
532 * the instruction starting at page offset zero) or by means of branching.
533 *
534 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
535 */
536IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
537{
538 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
539 uint32_t const cbInstr = (uint32_t)uParam0;
540 uint32_t const idxRange = (uint32_t)uParam1;
541 uint32_t const offRange = (uint32_t)uParam2;
542 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
543 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
544 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
545 //LogFunc(("okay\n"));
546 return VINF_SUCCESS;
547}
548
549
550/**
551 * Built-in function for loading TLB and checking opcodes and considering the
552 * need for CS.LIM checking when transitioning to a different code page.
553 *
554 * The code page transition can either be natural over onto the next page (with
555 * the instruction starting at page offset zero) or by means of branching.
556 *
557 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
558 */
559IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
560{
561 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
562 uint32_t const cbInstr = (uint32_t)uParam0;
563 uint32_t const idxRange = (uint32_t)uParam1;
564 uint32_t const offRange = (uint32_t)uParam2;
565 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
566 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
567 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
568 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
569 //LogFunc(("okay\n"));
570 return VINF_SUCCESS;
571}
572
573
574
575/*
576 * Natural page crossing checkers.
577 */
578
579/**
580 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
581 * both pages when transitioning to a different code page.
582 *
583 * This is used when the previous instruction requires revalidation of opcodes
584 * bytes and the current instruction stries a page boundrary with opcode bytes
585 * in both the old and new page.
586 *
587 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
588 */
589IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
590{
591 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
592 uint32_t const cbInstr = (uint32_t)uParam0;
593 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
594 uint32_t const idxRange1 = (uint32_t)uParam1;
595 uint32_t const offRange1 = (uint32_t)uParam2;
596 uint32_t const idxRange2 = idxRange1 + 1;
597 BODY_CHECK_CS_LIM(cbInstr);
598 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
599 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
600 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
601 return VINF_SUCCESS;
602}
603
604
605/**
606 * Built-in function for loading TLB and checking opcodes on both pages when
607 * transitioning to a different code page.
608 *
609 * This is used when the previous instruction requires revalidation of opcodes
610 * bytes and the current instruction stries a page boundrary with opcode bytes
611 * in both the old and new page.
612 *
613 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
614 */
615IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
616{
617 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
618 uint32_t const cbInstr = (uint32_t)uParam0;
619 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
620 uint32_t const idxRange1 = (uint32_t)uParam1;
621 uint32_t const offRange1 = (uint32_t)uParam2;
622 uint32_t const idxRange2 = idxRange1 + 1;
623 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
624 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
625 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
626 return VINF_SUCCESS;
627}
628
629
630/**
631 * Built-in function for loading TLB and checking opcodes on both pages and
632 * considering the need for CS.LIM checking when transitioning to a different
633 * code page.
634 *
635 * This is used when the previous instruction requires revalidation of opcodes
636 * bytes and the current instruction stries a page boundrary with opcode bytes
637 * in both the old and new page.
638 *
639 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
640 */
641IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
642{
643 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
644 uint32_t const cbInstr = (uint32_t)uParam0;
645 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
646 uint32_t const idxRange1 = (uint32_t)uParam1;
647 uint32_t const offRange1 = (uint32_t)uParam2;
648 uint32_t const idxRange2 = idxRange1 + 1;
649 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
650 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
651 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
652 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
653 return VINF_SUCCESS;
654}
655
656
657/**
658 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
659 * advancing naturally to a different code page.
660 *
661 * Only opcodes on the new page is checked.
662 *
663 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
664 */
665IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
666{
667 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
668 uint32_t const cbInstr = (uint32_t)uParam0;
669 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
670 uint32_t const idxRange1 = (uint32_t)uParam1;
671 //uint32_t const offRange1 = (uint32_t)uParam2;
672 uint32_t const idxRange2 = idxRange1 + 1;
673 BODY_CHECK_CS_LIM(cbInstr);
674 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
675 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
676 RT_NOREF(uParam2);
677 return VINF_SUCCESS;
678}
679
680
681/**
682 * Built-in function for loading TLB and checking opcodes when advancing
683 * naturally to a different code page.
684 *
685 * Only opcodes on the new page is checked.
686 *
687 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
688 */
689IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
690{
691 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
692 uint32_t const cbInstr = (uint32_t)uParam0;
693 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
694 uint32_t const idxRange1 = (uint32_t)uParam1;
695 //uint32_t const offRange1 = (uint32_t)uParam2;
696 uint32_t const idxRange2 = idxRange1 + 1;
697 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
698 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
699 RT_NOREF(uParam2);
700 return VINF_SUCCESS;
701}
702
703
704/**
705 * Built-in function for loading TLB and checking opcodes and considering the
706 * need for CS.LIM checking when advancing naturally to a different code page.
707 *
708 * Only opcodes on the new page is checked.
709 *
710 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
711 */
712IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
713{
714 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
715 uint32_t const cbInstr = (uint32_t)uParam0;
716 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
717 uint32_t const idxRange1 = (uint32_t)uParam1;
718 //uint32_t const offRange1 = (uint32_t)uParam2;
719 uint32_t const idxRange2 = idxRange1 + 1;
720 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
721 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
722 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
723 RT_NOREF(uParam2);
724 return VINF_SUCCESS;
725}
726
727
728/**
729 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
730 * advancing naturally to a different code page with first instr at byte 0.
731 *
732 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
733 */
734IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
735{
736 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
737 uint32_t const cbInstr = (uint32_t)uParam0;
738 uint32_t const idxRange = (uint32_t)uParam1;
739 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
740 BODY_CHECK_CS_LIM(cbInstr);
741 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
742 Assert(pVCpu->iem.s.offCurInstrStart == 0);
743 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
744 return VINF_SUCCESS;
745}
746
747
748/**
749 * Built-in function for loading TLB and checking opcodes when advancing
750 * naturally to a different code page with first instr at byte 0.
751 *
752 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
753 */
754IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
755{
756 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
757 uint32_t const cbInstr = (uint32_t)uParam0;
758 uint32_t const idxRange = (uint32_t)uParam1;
759 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
760 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
761 Assert(pVCpu->iem.s.offCurInstrStart == 0);
762 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
763 return VINF_SUCCESS;
764}
765
766
767/**
768 * Built-in function for loading TLB and checking opcodes and considering the
769 * need for CS.LIM checking when advancing naturally to a different code page
770 * with first instr at byte 0.
771 *
772 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
773 */
774IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
775{
776 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
777 uint32_t const cbInstr = (uint32_t)uParam0;
778 uint32_t const idxRange = (uint32_t)uParam1;
779 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
780 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
781 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
782 Assert(pVCpu->iem.s.offCurInstrStart == 0);
783 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
784 return VINF_SUCCESS;
785}
786
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette