VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp@ 104626

Last change on this file since 104626 was 104506, checked in by vboxsync, 7 months ago

VMM/IEM: Deal with direct 'linking' of TBs when requiring a TLB load and all that. bugref:10656

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 95.9 KB
Line 
1/* $Id: IEMAllN8veRecompBltIn.cpp 104506 2024-05-03 13:08:49Z vboxsync $ */
2/** @file
3 * IEM - Native Recompiler, Emitters for Built-In Threaded Functions.
4 */
5
6/*
7 * Copyright (C) 2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM_RE_NATIVE
33#define IEM_WITH_OPAQUE_DECODER_STATE
34#define VMCPU_INCL_CPUM_GST_CTX
35#define VMM_INCLUDED_SRC_include_IEMMc_h /* block IEMMc.h inclusion. */
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/cpum.h>
38#include <VBox/vmm/dbgf.h>
39#include "IEMInternal.h"
40#include <VBox/vmm/vmcc.h>
41#include <VBox/log.h>
42#include <VBox/err.h>
43#include <VBox/param.h>
44#include <iprt/assert.h>
45#include <iprt/string.h>
46#if defined(RT_ARCH_AMD64)
47# include <iprt/x86.h>
48#elif defined(RT_ARCH_ARM64)
49# include <iprt/armv8.h>
50#endif
51
52
53#include "IEMInline.h"
54#include "IEMThreadedFunctions.h"
55#include "IEMN8veRecompiler.h"
56#include "IEMN8veRecompilerEmit.h"
57#include "IEMN8veRecompilerTlbLookup.h"
58
59
60
61/*********************************************************************************************************************************
62* TB Helper Functions *
63*********************************************************************************************************************************/
64#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_ARM64)
65DECLASM(void) iemNativeHlpAsmSafeWrapLogCpuState(void);
66#endif
67
68
69/**
70 * Used by TB code to deal with a TLB miss for a new page.
71 */
72IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCodeNewPageTlbMiss,(PVMCPUCC pVCpu))
73{
74 STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage);
75 pVCpu->iem.s.pbInstrBuf = NULL;
76 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE;
77 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE;
78 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL);
79 if (pVCpu->iem.s.pbInstrBuf)
80 { /* likely */ }
81 else
82 {
83 IEM_DO_LONGJMP(pVCpu, VINF_IEM_REEXEC_BREAK);
84 }
85}
86
87
88/**
89 * Used by TB code to deal with a TLB miss for a new page.
90 */
91IEM_DECL_NATIVE_HLP_DEF(RTGCPHYS, iemNativeHlpMemCodeNewPageTlbMissWithOff,(PVMCPUCC pVCpu, uint8_t offInstr))
92{
93 STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset);
94 pVCpu->iem.s.pbInstrBuf = NULL;
95 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - offInstr;
96 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE;
97 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL);
98 return pVCpu->iem.s.pbInstrBuf ? pVCpu->iem.s.GCPhysInstrBuf : NIL_RTGCPHYS;
99}
100
101
102/*********************************************************************************************************************************
103* Builtin functions *
104*********************************************************************************************************************************/
105
106/**
107 * Built-in function that does nothing.
108 *
109 * Whether this is called or not can be controlled by the entry in the
110 * IEMThreadedGenerator.katBltIns table. This can be useful to determine
111 * whether why behaviour changes when enabling the LogCpuState builtins. I.e.
112 * whether it's the reduced call count in the TBs or the threaded calls flushing
113 * register state.
114 */
115IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_Nop)
116{
117 RT_NOREF(pReNative, pCallEntry);
118 return off;
119}
120
121IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_Nop)
122{
123 *pOutgoing = *pIncoming;
124 RT_NOREF(pCallEntry);
125}
126
127
128/**
129 * Emits for for LogCpuState.
130 *
131 * This shouldn't have any relevant impact on the recompiler state.
132 */
133IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_LogCpuState)
134{
135#ifdef RT_ARCH_AMD64
136 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);
137 /* push rax */
138 pbCodeBuf[off++] = 0x50 + X86_GREG_xAX;
139 /* push imm32 */
140 pbCodeBuf[off++] = 0x68;
141 pbCodeBuf[off++] = RT_BYTE1(pCallEntry->auParams[0]);
142 pbCodeBuf[off++] = RT_BYTE2(pCallEntry->auParams[0]);
143 pbCodeBuf[off++] = RT_BYTE3(pCallEntry->auParams[0]);
144 pbCodeBuf[off++] = RT_BYTE4(pCallEntry->auParams[0]);
145 /* mov rax, iemNativeHlpAsmSafeWrapLogCpuState */
146 pbCodeBuf[off++] = X86_OP_REX_W;
147 pbCodeBuf[off++] = 0xb8 + X86_GREG_xAX;
148 *(uint64_t *)&pbCodeBuf[off] = (uintptr_t)iemNativeHlpAsmSafeWrapLogCpuState;
149 off += sizeof(uint64_t);
150 /* call rax */
151 pbCodeBuf[off++] = 0xff;
152 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);
153 /* pop rax */
154 pbCodeBuf[off++] = 0x58 + X86_GREG_xAX;
155 /* pop rax */
156 pbCodeBuf[off++] = 0x58 + X86_GREG_xAX;
157#else
158 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpAsmSafeWrapLogCpuState);
159 RT_NOREF(pCallEntry);
160#endif
161
162 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
163 return off;
164}
165
166IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_LogCpuState)
167{
168 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
169 RT_NOREF(pCallEntry);
170}
171
172
173/**
174 * Built-in function that calls a C-implemention function taking zero arguments.
175 */
176IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_DeferToCImpl0)
177{
178 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)pCallEntry->auParams[0];
179 uint8_t const cbInstr = (uint8_t)pCallEntry->auParams[1];
180 uint64_t const fGstShwFlush = pCallEntry->auParams[2];
181 return iemNativeEmitCImplCall(pReNative, off, pCallEntry->idxInstr, fGstShwFlush, (uintptr_t)pfnCImpl, cbInstr, 0, 0, 0, 0);
182}
183
184IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_DeferToCImpl0)
185{
186 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
187 RT_NOREF(pCallEntry);
188}
189
190
191/**
192 * Flushes pending writes in preparation of raising an exception or aborting the TB.
193 */
194#define BODY_FLUSH_PENDING_WRITES() \
195 off = iemNativeRegFlushPendingWrites(pReNative, off);
196
197
198/**
199 * Built-in function that checks for pending interrupts that can be delivered or
200 * forced action flags.
201 *
202 * This triggers after the completion of an instruction, so EIP is already at
203 * the next instruction. If an IRQ or important FF is pending, this will return
204 * a non-zero status that stops TB execution.
205 */
206IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckIrq)
207{
208 RT_NOREF(pCallEntry);
209
210 BODY_FLUSH_PENDING_WRITES();
211
212 /* It's too convenient to use iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet below
213 and I'm too lazy to create a 'Fixed' version of that one. */
214 uint32_t const idxLabelVmCheck = iemNativeLabelCreate(pReNative, kIemNativeLabelType_CheckIrq,
215 UINT32_MAX, pReNative->uCheckIrqSeqNo++);
216
217 uint32_t const idxLabelReturnBreakFF = iemNativeLabelCreate(pReNative, kIemNativeLabelType_ReturnBreakFF);
218
219 /* Again, we need to load the extended EFLAGS before we actually need them
220 in case we jump. We couldn't use iemNativeRegAllocTmpForGuestReg if we
221 loaded them inside the check, as the shadow state would not be correct
222 when the code branches before the load. Ditto PC. */
223 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
224 kIemNativeGstRegUse_ReadOnly);
225
226 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ReadOnly);
227
228 uint8_t idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
229
230 /*
231 * Start by checking the local forced actions of the EMT we're on for IRQs
232 * and other FFs that needs servicing.
233 */
234 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
235 /* Load FFs in to idxTmpReg and AND with all relevant flags. */
236 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxTmpReg, RT_UOFFSETOF(VMCPUCC, fLocalForcedActions));
237 off = iemNativeEmitAndGprByImm(pReNative, off, idxTmpReg,
238 VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
239 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
240 | VMCPU_FF_TLB_FLUSH
241 | VMCPU_FF_UNHALT ),
242 true /*fSetFlags*/);
243 /* If we end up with ZERO in idxTmpReg there is nothing to do.*/
244 uint32_t const offFixupJumpToVmCheck1 = off;
245 off = iemNativeEmitJzToFixed(pReNative, off, off /* ASSUME jz rel8 suffices */);
246
247 /* Some relevant FFs are set, but if's only APIC or/and PIC being set,
248 these may be supressed by EFLAGS.IF or CPUMIsInInterruptShadow. */
249 off = iemNativeEmitAndGprByImm(pReNative, off, idxTmpReg,
250 ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), true /*fSetFlags*/);
251 /* Return VINF_IEM_REEXEC_BREAK if other FFs are set. */
252 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabelReturnBreakFF);
253
254 /* So, it's only interrupt releated FFs and we need to see if IRQs are being
255 suppressed by the CPU or not. */
256 off = iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(pReNative, off, idxEflReg, X86_EFL_IF_BIT, idxLabelVmCheck);
257 off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfNoneSet(pReNative, off, idxEflReg, CPUMCTX_INHIBIT_SHADOW,
258 idxLabelReturnBreakFF);
259
260 /* We've got shadow flags set, so we must check that the PC they are valid
261 for matches our current PC value. */
262 /** @todo AMD64 can do this more efficiently w/o loading uRipInhibitInt into
263 * a register. */
264 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxTmpReg, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.uRipInhibitInt));
265 off = iemNativeEmitTestIfGprNotEqualGprAndJmpToLabel(pReNative, off, idxTmpReg, idxPcReg, idxLabelReturnBreakFF);
266
267 /*
268 * Now check the force flags of the VM.
269 */
270 iemNativeLabelDefine(pReNative, idxLabelVmCheck, off);
271 iemNativeFixupFixedJump(pReNative, offFixupJumpToVmCheck1, off);
272 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxTmpReg, RT_UOFFSETOF(VMCPUCC, CTX_SUFF(pVM))); /* idxTmpReg = pVM */
273 off = iemNativeEmitLoadGprByGprU32(pReNative, off, idxTmpReg, idxTmpReg, RT_UOFFSETOF(VMCC, fGlobalForcedActions));
274 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxTmpReg, VM_FF_ALL_MASK, true /*fSetFlags*/);
275 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabelReturnBreakFF);
276
277 /** @todo STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks); */
278
279 /*
280 * We're good, no IRQs or FFs pending.
281 */
282 iemNativeRegFreeTmp(pReNative, idxTmpReg);
283 iemNativeRegFreeTmp(pReNative, idxEflReg);
284 iemNativeRegFreeTmp(pReNative, idxPcReg);
285
286 /*
287 * Note down that we've been here, so we can skip FFs + IRQ checks when
288 * doing direct linking.
289 */
290#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
291 pReNative->idxLastCheckIrqCallNo = pReNative->idxCurCall;
292#else
293 pReNative->idxLastCheckIrqCallNo = pCallEntry - pReNative->pTbOrg->Thrd.paCalls;
294#endif
295
296 return off;
297}
298
299IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckIrq)
300{
301 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
302 IEM_LIVENESS_RAW_EFLAGS_ONE_INPUT(pOutgoing, fEflOther);
303 RT_NOREF(pCallEntry);
304}
305
306
307/**
308 * Built-in function checks if IEMCPU::fExec has the expected value.
309 */
310IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckMode)
311{
312 uint32_t const fExpectedExec = (uint32_t)pCallEntry->auParams[0];
313 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
314
315 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxTmpReg, RT_UOFFSETOF(VMCPUCC, iem.s.fExec));
316 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxTmpReg, IEMTB_F_KEY_MASK);
317 off = iemNativeEmitTestIfGpr32NotEqualImmAndJmpToNewLabel(pReNative, off, idxTmpReg, fExpectedExec & IEMTB_F_KEY_MASK,
318 kIemNativeLabelType_ReturnBreak);
319 iemNativeRegFreeTmp(pReNative, idxTmpReg);
320
321 /* Maintain the recompiler fExec state. */
322 pReNative->fExec = fExpectedExec & IEMTB_F_IEM_F_MASK;
323 return off;
324}
325
326IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckMode)
327{
328 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
329 RT_NOREF(pCallEntry);
330}
331
332
333/**
334 * Sets idxTbCurInstr in preparation of raising an exception or aborting the TB.
335 */
336/** @todo Optimize this, so we don't set the same value more than once. Just
337 * needs some tracking. */
338#ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
339# define BODY_SET_CUR_INSTR() \
340 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, pCallEntry->idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr))
341#else
342# define BODY_SET_CUR_INSTR() ((void)0)
343#endif
344
345
346/**
347 * Macro that emits the 16/32-bit CS.LIM check.
348 */
349#define BODY_CHECK_CS_LIM(a_cbInstr) \
350 off = iemNativeEmitBltInCheckCsLim(pReNative, off, (a_cbInstr))
351
352#define LIVENESS_CHECK_CS_LIM(a_pOutgoing) \
353 IEM_LIVENESS_RAW_SEG_LIMIT_INPUT(a_pOutgoing, X86_SREG_CS)
354
355DECL_FORCE_INLINE(uint32_t)
356iemNativeEmitBltInCheckCsLim(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr)
357{
358 Assert(cbInstr > 0);
359 Assert(cbInstr < 16);
360#ifdef VBOX_STRICT
361 off = iemNativeEmitMarker(pReNative, off, 0x80000001);
362#endif
363
364#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
365 Assert(pReNative->Core.offPc == 0);
366#endif
367
368 /*
369 * We need CS.LIM and RIP here. When cbInstr is larger than 1, we also need
370 * a temporary register for calculating the last address of the instruction.
371 *
372 * The calculation and comparisons are 32-bit. We ASSUME that the incoming
373 * RIP isn't totally invalid, i.e. that any jump/call/ret/iret instruction
374 * that last updated EIP here checked it already, and that we're therefore
375 * safe in the 32-bit wrap-around scenario to only check that the last byte
376 * is within CS.LIM. In the case of instruction-by-instruction advancing
377 * up to a EIP wrap-around, we know that CS.LIM is 4G-1 because the limit
378 * must be using 4KB granularity and the previous instruction was fine.
379 */
380 uint8_t const idxRegPc = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
381 kIemNativeGstRegUse_ReadOnly);
382 uint8_t const idxRegCsLim = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_LIMIT(X86_SREG_CS),
383 kIemNativeGstRegUse_ReadOnly);
384#ifdef RT_ARCH_AMD64
385 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
386#elif defined(RT_ARCH_ARM64)
387 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
388#else
389# error "Port me"
390#endif
391
392 if (cbInstr != 1)
393 {
394 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off);
395
396 /*
397 * 1. idxRegTmp = idxRegPc + cbInstr;
398 * 2. if idxRegTmp > idxRegCsLim then raise #GP(0).
399 */
400#ifdef RT_ARCH_AMD64
401 /* 1. lea tmp32, [Pc + cbInstr - 1] */
402 if (idxRegTmp >= 8 || idxRegPc >= 8)
403 pbCodeBuf[off++] = (idxRegTmp < 8 ? 0 : X86_OP_REX_R) | (idxRegPc < 8 ? 0 : X86_OP_REX_B);
404 pbCodeBuf[off++] = 0x8d;
405 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, idxRegTmp & 7, idxRegPc & 7);
406 if ((idxRegPc & 7) == X86_GREG_xSP)
407 pbCodeBuf[off++] = X86_SIB_MAKE(idxRegPc & 7, 4 /*no index*/, 0);
408 pbCodeBuf[off++] = cbInstr - 1;
409
410 /* 2. cmp tmp32(r), CsLim(r/m). */
411 if (idxRegTmp >= 8 || idxRegCsLim >= 8)
412 pbCodeBuf[off++] = (idxRegTmp < 8 ? 0 : X86_OP_REX_R) | (idxRegCsLim < 8 ? 0 : X86_OP_REX_B);
413 pbCodeBuf[off++] = 0x3b;
414 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegTmp & 7, idxRegCsLim & 7);
415
416#elif defined(RT_ARCH_ARM64)
417 /* 1. add tmp32, Pc, #cbInstr-1 */
418 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, idxRegTmp, idxRegPc, cbInstr - 1, false /*f64Bit*/);
419 /* 2. cmp tmp32, CsLim */
420 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, ARMV8_A64_REG_XZR, idxRegTmp, idxRegCsLim,
421 false /*f64Bit*/, true /*fSetFlags*/);
422
423#endif
424 iemNativeRegFreeTmp(pReNative, idxRegTmp);
425 }
426 else
427 {
428 /*
429 * Here we can skip step 1 and compare PC and CS.LIM directly.
430 */
431#ifdef RT_ARCH_AMD64
432 /* 2. cmp eip(r), CsLim(r/m). */
433 if (idxRegPc >= 8 || idxRegCsLim >= 8)
434 pbCodeBuf[off++] = (idxRegPc < 8 ? 0 : X86_OP_REX_R) | (idxRegCsLim < 8 ? 0 : X86_OP_REX_B);
435 pbCodeBuf[off++] = 0x3b;
436 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegPc & 7, idxRegCsLim & 7);
437
438#elif defined(RT_ARCH_ARM64)
439 /* 2. cmp Pc, CsLim */
440 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, ARMV8_A64_REG_XZR, idxRegPc, idxRegCsLim,
441 false /*f64Bit*/, true /*fSetFlags*/);
442
443#endif
444 }
445
446 /* 3. Jump if greater. */
447 off = iemNativeEmitJaToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
448
449 iemNativeRegFreeTmp(pReNative, idxRegCsLim);
450 iemNativeRegFreeTmp(pReNative, idxRegPc);
451 return off;
452}
453
454
455/**
456 * Macro that considers whether we need CS.LIM checking after a branch or
457 * crossing over to a new page.
458 */
459#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) \
460 RT_NOREF(a_cbInstr); \
461 off = iemNativeEmitBltInConsiderLimChecking(pReNative, off)
462
463#define LIVENESS_CONSIDER_CS_LIM_CHECKING(a_pOutgoing) \
464 IEM_LIVENESS_RAW_SEG_LIMIT_INPUT(a_pOutgoing, X86_SREG_CS); \
465 IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, X86_SREG_CS)
466
467DECL_FORCE_INLINE(uint32_t)
468iemNativeEmitBltInConsiderLimChecking(PIEMRECOMPILERSTATE pReNative, uint32_t off)
469{
470#ifdef VBOX_STRICT
471 off = iemNativeEmitMarker(pReNative, off, 0x80000002);
472#endif
473
474#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
475 Assert(pReNative->Core.offPc == 0);
476#endif
477
478 /*
479 * This check must match the ones in the iem in iemGetTbFlagsForCurrentPc
480 * exactly:
481 *
482 * int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
483 * if (offFromLim >= X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
484 * return fRet;
485 * return fRet | IEMTB_F_CS_LIM_CHECKS;
486 *
487 *
488 * We need EIP, CS.LIM and CS.BASE here.
489 */
490
491 /* Calculate the offFromLim first: */
492 uint8_t const idxRegPc = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
493 kIemNativeGstRegUse_ReadOnly);
494 uint8_t const idxRegCsLim = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_LIMIT(X86_SREG_CS),
495 kIemNativeGstRegUse_ReadOnly);
496 uint8_t const idxRegLeft = iemNativeRegAllocTmp(pReNative, &off);
497
498#ifdef RT_ARCH_ARM64
499 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
500 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegLeft, idxRegCsLim, idxRegPc);
501 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
502#else
503 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegLeft, idxRegCsLim);
504 off = iemNativeEmitSubTwoGprs(pReNative, off, idxRegLeft, idxRegPc);
505#endif
506
507 iemNativeRegFreeTmp(pReNative, idxRegCsLim);
508 iemNativeRegFreeTmp(pReNative, idxRegPc);
509
510 /* Calculate the threshold level (right side). */
511 uint8_t const idxRegCsBase = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS),
512 kIemNativeGstRegUse_ReadOnly);
513 uint8_t const idxRegRight = iemNativeRegAllocTmp(pReNative, &off);
514
515#ifdef RT_ARCH_ARM64
516 pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
517 Assert(Armv8A64ConvertImmRImmS2Mask32(11, 0) == GUEST_PAGE_OFFSET_MASK);
518 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegRight, idxRegCsBase, 11, 0, false /*f64Bit*/);
519 pu32CodeBuf[off++] = Armv8A64MkInstrNeg(idxRegRight);
520 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegRight, idxRegRight, (X86_PAGE_SIZE + 16) / 2);
521 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegRight, idxRegRight, (X86_PAGE_SIZE + 16) / 2);
522 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
523
524#else
525 off = iemNativeEmitLoadGprImm32(pReNative, off, idxRegRight, GUEST_PAGE_OFFSET_MASK);
526 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, idxRegRight, idxRegCsBase);
527 off = iemNativeEmitNegGpr(pReNative, off, idxRegRight);
528 off = iemNativeEmitAddGprImm(pReNative, off, idxRegRight, X86_PAGE_SIZE + 16);
529#endif
530
531 iemNativeRegFreeTmp(pReNative, idxRegCsBase);
532
533 /* Compare the two and jump out if we're too close to the limit. */
534 off = iemNativeEmitCmpGprWithGpr(pReNative, off, idxRegLeft, idxRegRight);
535 off = iemNativeEmitJlToNewLabel(pReNative, off, kIemNativeLabelType_NeedCsLimChecking);
536
537 iemNativeRegFreeTmp(pReNative, idxRegRight);
538 iemNativeRegFreeTmp(pReNative, idxRegLeft);
539 return off;
540}
541
542
543
544/**
545 * Macro that implements opcode (re-)checking.
546 */
547#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) \
548 RT_NOREF(a_cbInstr); \
549 off = iemNativeEmitBltInCheckOpcodes(pReNative, off, (a_pTb), (a_idxRange), (a_offRange))
550
551#define LIVENESS_CHECK_OPCODES(a_pOutgoing) ((void)0)
552
553#if 0 /* debugging aid */
554bool g_fBpOnObsoletion = false;
555# define BP_ON_OBSOLETION g_fBpOnObsoletion
556#else
557# define BP_ON_OBSOLETION 0
558#endif
559
560DECL_FORCE_INLINE(uint32_t)
561iemNativeEmitBltInCheckOpcodes(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb, uint8_t idxRange, uint16_t offRange)
562{
563 Assert(idxRange < pTb->cRanges && pTb->cRanges <= RT_ELEMENTS(pTb->aRanges));
564 Assert(offRange < pTb->aRanges[idxRange].cbOpcodes);
565#ifdef VBOX_STRICT
566 off = iemNativeEmitMarker(pReNative, off, 0x80000003);
567#endif
568
569 uint32_t const idxLabelObsoleteTb = iemNativeLabelCreate(pReNative, kIemNativeLabelType_ObsoleteTb);
570
571 /*
572 * Where to start and how much to compare.
573 *
574 * Looking at the ranges produced when r160746 was running a DOS VM with TB
575 * logging, the ranges can be anything from 1 byte to at least 0x197 bytes,
576 * with the 6, 5, 4, 7, 8, 40, 3, 2, 9 and 10 being the top 10 in the sample.
577 *
578 * The top 10 for the early boot phase of a 64-bit debian 9.4 VM: 5, 9, 8,
579 * 12, 10, 11, 6, 13, 15 and 16. Max 0x359 bytes. Same revision as above.
580 */
581 uint16_t offPage = pTb->aRanges[idxRange].offPhysPage + offRange;
582 uint16_t cbLeft = pTb->aRanges[idxRange].cbOpcodes - offRange;
583 Assert(cbLeft > 0);
584 uint8_t const *pbOpcodes = &pTb->pabOpcodes[pTb->aRanges[idxRange].offOpcodes + offRange];
585 uint32_t offConsolidatedJump = UINT32_MAX;
586
587#ifdef RT_ARCH_AMD64
588 /* AMD64/x86 offers a bunch of options. Smaller stuff will can be
589 completely inlined, for larger we use REPE CMPS. */
590# define CHECK_OPCODES_CMP_IMMXX(a_idxReg, a_bOpcode) /* cost: 3 bytes */ do { \
591 pbCodeBuf[off++] = a_bOpcode; \
592 Assert(offPage < 127); \
593 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, 7, a_idxReg); \
594 pbCodeBuf[off++] = RT_BYTE1(offPage); \
595 } while (0)
596
597# define CHECK_OPCODES_CMP_JMP() /* cost: 7 bytes first time, then 2 bytes */ do { \
598 if (offConsolidatedJump != UINT32_MAX) \
599 { \
600 int32_t const offDisp = (int32_t)offConsolidatedJump - (int32_t)(off + 2); \
601 Assert(offDisp >= -128); \
602 pbCodeBuf[off++] = 0x75; /* jnz near */ \
603 pbCodeBuf[off++] = (uint8_t)offDisp; \
604 } \
605 else \
606 { \
607 pbCodeBuf[off++] = 0x74; /* jz near +5 */ \
608 pbCodeBuf[off++] = 0x05 + BP_ON_OBSOLETION; \
609 offConsolidatedJump = off; \
610 if (BP_ON_OBSOLETION) pbCodeBuf[off++] = 0xcc; \
611 pbCodeBuf[off++] = 0xe9; /* jmp rel32 */ \
612 iemNativeAddFixup(pReNative, off, idxLabelObsoleteTb, kIemNativeFixupType_Rel32, -4); \
613 pbCodeBuf[off++] = 0x00; \
614 pbCodeBuf[off++] = 0x00; \
615 pbCodeBuf[off++] = 0x00; \
616 pbCodeBuf[off++] = 0x00; \
617 } \
618 } while (0)
619
620# define CHECK_OPCODES_CMP_IMM32(a_idxReg) /* cost: 3+4+2 = 9 */ do { \
621 CHECK_OPCODES_CMP_IMMXX(a_idxReg, 0x81); \
622 pbCodeBuf[off++] = *pbOpcodes++; \
623 pbCodeBuf[off++] = *pbOpcodes++; \
624 pbCodeBuf[off++] = *pbOpcodes++; \
625 pbCodeBuf[off++] = *pbOpcodes++; \
626 cbLeft -= 4; \
627 offPage += 4; \
628 CHECK_OPCODES_CMP_JMP(); \
629 } while (0)
630
631# define CHECK_OPCODES_CMP_IMM16(a_idxReg) /* cost: 1+3+2+2 = 8 */ do { \
632 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP; \
633 CHECK_OPCODES_CMP_IMMXX(a_idxReg, 0x81); \
634 pbCodeBuf[off++] = *pbOpcodes++; \
635 pbCodeBuf[off++] = *pbOpcodes++; \
636 cbLeft -= 2; \
637 offPage += 2; \
638 CHECK_OPCODES_CMP_JMP(); \
639 } while (0)
640
641# define CHECK_OPCODES_CMP_IMM8(a_idxReg) /* cost: 3+1+2 = 6 */ do { \
642 CHECK_OPCODES_CMP_IMMXX(a_idxReg, 0x80); \
643 pbCodeBuf[off++] = *pbOpcodes++; \
644 cbLeft -= 1; \
645 offPage += 1; \
646 CHECK_OPCODES_CMP_JMP(); \
647 } while (0)
648
649# define CHECK_OPCODES_CMPSX(a_bOpcode, a_cbToSubtract, a_bPrefix) /* cost: 2+2 = 4 */ do { \
650 if (a_bPrefix) \
651 pbCodeBuf[off++] = (a_bPrefix); \
652 pbCodeBuf[off++] = (a_bOpcode); \
653 CHECK_OPCODES_CMP_JMP(); \
654 cbLeft -= (a_cbToSubtract); \
655 } while (0)
656
657# define CHECK_OPCODES_ECX_IMM(a_uValue) /* cost: 5 */ do { \
658 pbCodeBuf[off++] = 0xb8 + X86_GREG_xCX; \
659 pbCodeBuf[off++] = RT_BYTE1(a_uValue); \
660 pbCodeBuf[off++] = RT_BYTE2(a_uValue); \
661 pbCodeBuf[off++] = RT_BYTE3(a_uValue); \
662 pbCodeBuf[off++] = RT_BYTE4(a_uValue); \
663 } while (0)
664
665 if (cbLeft <= 24)
666 {
667 uint8_t const idxRegTmp = iemNativeRegAllocTmpEx(pReNative, &off,
668 ( RT_BIT_32(X86_GREG_xAX)
669 | RT_BIT_32(X86_GREG_xCX)
670 | RT_BIT_32(X86_GREG_xDX)
671 | RT_BIT_32(X86_GREG_xBX)
672 | RT_BIT_32(X86_GREG_xSI)
673 | RT_BIT_32(X86_GREG_xDI))
674 & ~IEMNATIVE_REG_FIXED_MASK); /* pick reg not requiring rex prefix */
675 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.pbInstrBuf));
676 if (offPage >= 128 - cbLeft)
677 {
678 off = iemNativeEmitAddGprImm(pReNative, off, idxRegTmp, offPage & ~(uint16_t)3);
679 offPage &= 3;
680 }
681
682 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5 + 14 + 54 + 8 + 6 + BP_ON_OBSOLETION /* = 87 */);
683
684 if (cbLeft > 8)
685 switch (offPage & 3)
686 {
687 case 0:
688 break;
689 case 1: /* cost: 6 + 8 = 14 */
690 CHECK_OPCODES_CMP_IMM8(idxRegTmp);
691 RT_FALL_THRU();
692 case 2: /* cost: 8 */
693 CHECK_OPCODES_CMP_IMM16(idxRegTmp);
694 break;
695 case 3: /* cost: 6 */
696 CHECK_OPCODES_CMP_IMM8(idxRegTmp);
697 break;
698 }
699
700 while (cbLeft >= 4)
701 CHECK_OPCODES_CMP_IMM32(idxRegTmp); /* max iteration: 24/4 = 6; --> cost: 6 * 9 = 54 */
702
703 if (cbLeft >= 2)
704 CHECK_OPCODES_CMP_IMM16(idxRegTmp); /* cost: 8 */
705 if (cbLeft)
706 CHECK_OPCODES_CMP_IMM8(idxRegTmp); /* cost: 6 */
707
708 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
709 iemNativeRegFreeTmp(pReNative, idxRegTmp);
710 }
711 else
712 {
713 /* RDI = &pbInstrBuf[offPage] */
714 uint8_t const idxRegDi = iemNativeRegAllocTmpEx(pReNative, &off, RT_BIT_32(X86_GREG_xDI));
715 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegDi, RT_UOFFSETOF(VMCPU, iem.s.pbInstrBuf));
716 if (offPage != 0)
717 off = iemNativeEmitAddGprImm(pReNative, off, idxRegDi, offPage);
718
719 /* RSI = pbOpcodes */
720 uint8_t const idxRegSi = iemNativeRegAllocTmpEx(pReNative, &off, RT_BIT_32(X86_GREG_xSI));
721 off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegSi, (uintptr_t)pbOpcodes);
722
723 /* RCX = counts. */
724 uint8_t const idxRegCx = iemNativeRegAllocTmpEx(pReNative, &off, RT_BIT_32(X86_GREG_xCX));
725
726 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5 + 10 + 5 + 5 + 3 + 4 + 3 + BP_ON_OBSOLETION /*= 35*/);
727
728 /** @todo profile and optimize this further. Maybe an idea to align by
729 * offPage if the two cannot be reconsidled. */
730 /* Align by the page offset, so that at least one of the accesses are naturally aligned. */
731 switch (offPage & 7) /* max cost: 10 */
732 {
733 case 0:
734 break;
735 case 1: /* cost: 3+4+3 = 10 */
736 CHECK_OPCODES_CMPSX(0xa6, 1, 0);
737 RT_FALL_THRU();
738 case 2: /* cost: 4+3 = 7 */
739 CHECK_OPCODES_CMPSX(0xa7, 2, X86_OP_PRF_SIZE_OP);
740 CHECK_OPCODES_CMPSX(0xa7, 4, 0);
741 break;
742 case 3: /* cost: 3+3 = 6 */
743 CHECK_OPCODES_CMPSX(0xa6, 1, 0);
744 RT_FALL_THRU();
745 case 4: /* cost: 3 */
746 CHECK_OPCODES_CMPSX(0xa7, 4, 0);
747 break;
748 case 5: /* cost: 3+4 = 7 */
749 CHECK_OPCODES_CMPSX(0xa6, 1, 0);
750 RT_FALL_THRU();
751 case 6: /* cost: 4 */
752 CHECK_OPCODES_CMPSX(0xa7, 2, X86_OP_PRF_SIZE_OP);
753 break;
754 case 7: /* cost: 3 */
755 CHECK_OPCODES_CMPSX(0xa6, 1, 0);
756 break;
757 }
758
759 /* Compare qwords: */
760 uint32_t const cQWords = cbLeft >> 3;
761 CHECK_OPCODES_ECX_IMM(cQWords); /* cost: 5 */
762
763 pbCodeBuf[off++] = X86_OP_PRF_REPZ; /* cost: 5 */
764 CHECK_OPCODES_CMPSX(0xa7, 0, X86_OP_REX_W);
765 cbLeft &= 7;
766
767 if (cbLeft & 4)
768 CHECK_OPCODES_CMPSX(0xa7, 4, 0); /* cost: 3 */
769 if (cbLeft & 2)
770 CHECK_OPCODES_CMPSX(0xa7, 2, X86_OP_PRF_SIZE_OP); /* cost: 4 */
771 if (cbLeft & 1)
772 CHECK_OPCODES_CMPSX(0xa6, 1, 0); /* cost: 3 */
773
774 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
775 iemNativeRegFreeTmp(pReNative, idxRegCx);
776 iemNativeRegFreeTmp(pReNative, idxRegSi);
777 iemNativeRegFreeTmp(pReNative, idxRegDi);
778 }
779
780#elif defined(RT_ARCH_ARM64)
781 /* We need pbInstrBuf in a register, whatever we do. */
782 uint8_t const idxRegSrc1Ptr = iemNativeRegAllocTmp(pReNative, &off);
783 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegSrc1Ptr, RT_UOFFSETOF(VMCPU, iem.s.pbInstrBuf));
784
785 /* We also need at least one more register for holding bytes & words we
786 load via pbInstrBuf. */
787 uint8_t const idxRegSrc1Val = iemNativeRegAllocTmp(pReNative, &off);
788
789 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64);
790
791 /* One byte compare can be done with the opcode byte as an immediate. We'll
792 do this to uint16_t align src1. */
793 bool fPendingJmp = RT_BOOL(offPage & 1);
794 if (fPendingJmp)
795 {
796 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Byte, idxRegSrc1Val, idxRegSrc1Ptr, offPage);
797 pu32CodeBuf[off++] = Armv8A64MkInstrCmpUImm12(idxRegSrc1Val, *pbOpcodes++, false /*f64Bit*/);
798 offPage += 1;
799 cbLeft -= 1;
800 }
801
802 if (cbLeft > 0)
803 {
804 /* We need a register for holding the opcode bytes we're comparing with,
805 as CCMP only has a 5-bit immediate form and thus cannot hold bytes. */
806 uint8_t const idxRegSrc2Val = iemNativeRegAllocTmp(pReNative, &off);
807
808 /* Word (uint32_t) aligning the src1 pointer is best done using a 16-bit constant load. */
809 if ((offPage & 3) && cbLeft >= 2)
810 {
811 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Half, idxRegSrc1Val, idxRegSrc1Ptr, offPage / 2);
812 pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegSrc2Val, RT_MAKE_U16(pbOpcodes[0], pbOpcodes[1]));
813 if (fPendingJmp)
814 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
815 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
816 else
817 {
818 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, false /*f64Bit*/);
819 fPendingJmp = true;
820 }
821 pbOpcodes += 2;
822 offPage += 2;
823 cbLeft -= 2;
824 }
825
826 /* DWord (uint64_t) aligning the src2 pointer. We use a 32-bit constant here for simplicitly. */
827 if ((offPage & 7) && cbLeft >= 4)
828 {
829 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxRegSrc1Val, idxRegSrc1Ptr, offPage / 4);
830 off = iemNativeEmitLoadGpr32ImmEx(pu32CodeBuf, off, idxRegSrc2Val,
831 RT_MAKE_U32_FROM_MSB_U8(pbOpcodes[3], pbOpcodes[2], pbOpcodes[1], pbOpcodes[0]));
832 if (fPendingJmp)
833 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
834 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
835 else
836 {
837 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, false /*f64Bit*/);
838 fPendingJmp = true;
839 }
840 pbOpcodes += 4;
841 offPage += 4;
842 cbLeft -= 4;
843 }
844
845 /*
846 * If we've got 16 bytes or more left, switch to memcmp-style.
847 */
848 if (cbLeft >= 16)
849 {
850 /* We need a pointer to the copy of the original opcode bytes. */
851 uint8_t const idxRegSrc2Ptr = iemNativeRegAllocTmp(pReNative, &off);
852 off = iemNativeEmitLoadGprImmEx(pu32CodeBuf, off, idxRegSrc2Ptr, (uintptr_t)pbOpcodes);
853
854 /* If there are more than 32 bytes to compare we create a loop, for
855 which we'll need a loop register. */
856 if (cbLeft >= 64)
857 {
858 if (fPendingJmp)
859 {
860 iemNativeAddFixup(pReNative, off, idxLabelObsoleteTb, kIemNativeFixupType_RelImm19At5);
861 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(kArmv8InstrCond_Ne, 0);
862 fPendingJmp = false;
863 }
864
865 uint8_t const idxRegLoop = iemNativeRegAllocTmp(pReNative, &off);
866 uint16_t const cLoops = cbLeft / 32;
867 cbLeft = cbLeft % 32;
868 pbOpcodes += cLoops * 32;
869 pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegLoop, cLoops);
870
871 if (offPage != 0) /** @todo optimize out this instruction. */
872 {
873 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegSrc1Ptr, idxRegSrc1Ptr, offPage);
874 offPage = 0;
875 }
876
877 uint32_t const offLoopStart = off;
878 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr, 0);
879 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val, idxRegSrc2Ptr, 0);
880 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val);
881
882 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr, 1);
883 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val, idxRegSrc2Ptr, 1);
884 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
885 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq);
886
887 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr, 2);
888 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val, idxRegSrc2Ptr, 2);
889 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
890 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq);
891
892 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr, 3);
893 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val, idxRegSrc2Ptr, 3);
894 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
895 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq);
896
897 iemNativeAddFixup(pReNative, off, idxLabelObsoleteTb, kIemNativeFixupType_RelImm19At5);
898 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(kArmv8InstrCond_Ne, 0);
899
900 /* Advance and loop. */
901 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegSrc1Ptr, idxRegSrc1Ptr, 0x20);
902 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegSrc2Ptr, idxRegSrc2Ptr, 0x20);
903 pu32CodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegLoop, idxRegLoop, 1, false /*f64Bit*/, true /*fSetFlags*/);
904 pu32CodeBuf[off] = Armv8A64MkInstrBCond(kArmv8InstrCond_Ne, (int32_t)offLoopStart - (int32_t)off);
905 off++;
906
907 iemNativeRegFreeTmp(pReNative, idxRegLoop);
908 }
909
910 /* Deal with any remaining dwords (uint64_t). There can be up to
911 three if we looped and four if we didn't. */
912 uint32_t offSrc2 = 0;
913 while (cbLeft >= 8)
914 {
915 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val,
916 idxRegSrc1Ptr, offPage / 8);
917 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val,
918 idxRegSrc2Ptr, offSrc2 / 8);
919 if (fPendingJmp)
920 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
921 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq);
922 else
923 {
924 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val);
925 fPendingJmp = true;
926 }
927 pbOpcodes += 8;
928 offPage += 8;
929 offSrc2 += 8;
930 cbLeft -= 8;
931 }
932
933 iemNativeRegFreeTmp(pReNative, idxRegSrc2Ptr);
934 /* max cost thus far: memcmp-loop=43 vs memcmp-no-loop=30 */
935 }
936 /*
937 * Otherwise, we compare with constants and merge with the general mop-up.
938 */
939 else
940 {
941 while (cbLeft >= 8)
942 {
943 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr,
944 offPage / 8);
945 off = iemNativeEmitLoadGprImmEx(pu32CodeBuf, off, idxRegSrc2Val,
946 RT_MAKE_U64_FROM_MSB_U8(pbOpcodes[7], pbOpcodes[6], pbOpcodes[5], pbOpcodes[4],
947 pbOpcodes[3], pbOpcodes[2], pbOpcodes[1], pbOpcodes[0]));
948 if (fPendingJmp)
949 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
950 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, true /*f64Bit*/);
951 else
952 {
953 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, true /*f64Bit*/);
954 fPendingJmp = true;
955 }
956 pbOpcodes += 8;
957 offPage += 8;
958 cbLeft -= 8;
959 }
960 /* max cost thus far: 21 */
961 }
962
963 /* Deal with any remaining bytes (7 or less). */
964 Assert(cbLeft < 8);
965 if (cbLeft >= 4)
966 {
967 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxRegSrc1Val, idxRegSrc1Ptr,
968 offPage / 4);
969 off = iemNativeEmitLoadGpr32ImmEx(pu32CodeBuf, off, idxRegSrc2Val,
970 RT_MAKE_U32_FROM_MSB_U8(pbOpcodes[3], pbOpcodes[2], pbOpcodes[1], pbOpcodes[0]));
971 if (fPendingJmp)
972 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
973 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
974 else
975 {
976 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, false /*f64Bit*/);
977 fPendingJmp = true;
978 }
979 pbOpcodes += 4;
980 offPage += 4;
981 cbLeft -= 4;
982
983 }
984
985 if (cbLeft >= 2)
986 {
987 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Half, idxRegSrc1Val, idxRegSrc1Ptr,
988 offPage / 2);
989 pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegSrc2Val, RT_MAKE_U16(pbOpcodes[0], pbOpcodes[1]));
990 if (fPendingJmp)
991 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
992 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
993 else
994 {
995 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, false /*f64Bit*/);
996 fPendingJmp = true;
997 }
998 pbOpcodes += 2;
999 offPage += 2;
1000 cbLeft -= 2;
1001 }
1002
1003 if (cbLeft > 0)
1004 {
1005 Assert(cbLeft == 1);
1006 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Byte, idxRegSrc1Val, idxRegSrc1Ptr, offPage);
1007 if (fPendingJmp)
1008 {
1009 pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegSrc2Val, pbOpcodes[0]);
1010 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
1011 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
1012 }
1013 else
1014 {
1015 pu32CodeBuf[off++] = Armv8A64MkInstrCmpUImm12(idxRegSrc1Val, pbOpcodes[0], false /*f64Bit*/);
1016 fPendingJmp = true;
1017 }
1018 pbOpcodes += 1;
1019 offPage += 1;
1020 cbLeft -= 1;
1021 }
1022
1023 iemNativeRegFreeTmp(pReNative, idxRegSrc2Val);
1024 }
1025 Assert(cbLeft == 0);
1026
1027 /*
1028 * Finally, the branch on difference.
1029 */
1030 if (fPendingJmp)
1031 {
1032 iemNativeAddFixup(pReNative, off, idxLabelObsoleteTb, kIemNativeFixupType_RelImm19At5);
1033 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(kArmv8InstrCond_Ne, 0);
1034 }
1035 RT_NOREF(pu32CodeBuf, cbLeft, offPage, pbOpcodes, offConsolidatedJump, idxLabelObsoleteTb);
1036
1037 /* max costs: memcmp-loop=54; memcmp-no-loop=41; only-src1-ptr=32 */
1038 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1039 iemNativeRegFreeTmp(pReNative, idxRegSrc1Val);
1040 iemNativeRegFreeTmp(pReNative, idxRegSrc1Ptr);
1041
1042#else
1043# error "Port me"
1044#endif
1045 return off;
1046}
1047
1048
1049
1050/**
1051 * Macro that implements PC check after a conditional branch.
1052 */
1053#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) \
1054 RT_NOREF(a_cbInstr); \
1055 off = iemNativeEmitBltInCheckPcAfterBranch(pReNative, off, a_pTb, a_idxRange, a_offRange)
1056
1057#define LIVENESS_CHECK_PC_AFTER_BRANCH(a_pOutgoing, a_pCallEntry) \
1058 if (!IEM_F_MODE_X86_IS_FLAT((uint32_t)(a_pCallEntry)->auParams[0] >> 8)) \
1059 IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, X86_SREG_CS); \
1060 else do { } while (0)
1061
1062DECL_FORCE_INLINE(uint32_t)
1063iemNativeEmitBltInCheckPcAfterBranch(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb,
1064 uint8_t idxRange, uint16_t offRange)
1065{
1066#ifdef VBOX_STRICT
1067 off = iemNativeEmitMarker(pReNative, off, 0x80000004);
1068#endif
1069
1070#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
1071 Assert(pReNative->Core.offPc == 0);
1072#endif
1073
1074 /*
1075 * The GCPhysRangePageWithOffset value in the threaded function is a fixed
1076 * constant for us here.
1077 *
1078 * We can pretend that iem.s.cbInstrBufTotal is X86_PAGE_SIZE here, because
1079 * it serves no purpose as a CS.LIM, if that's needed we've just performed
1080 * it, and as long as we don't implement code TLB reload code here there is
1081 * no point in checking that the TLB data we're using is still valid.
1082 *
1083 * What we to do is.
1084 * 1. Calculate the FLAT PC (RIP + CS.BASE).
1085 * 2. Subtract iem.s.uInstrBufPc from it and getting 'off'.
1086 * 3. The 'off' must be less than X86_PAGE_SIZE/cbInstrBufTotal or
1087 * we're in the wrong spot and need to find a new TB.
1088 * 4. Add 'off' to iem.s.GCPhysInstrBuf and compare with the
1089 * GCPhysRangePageWithOffset constant mentioned above.
1090 *
1091 * The adding of CS.BASE to RIP can be skipped in the first step if we're
1092 * in 64-bit code or flat 32-bit.
1093 */
1094
1095 /* Allocate registers for step 1. Get the shadowed stuff before allocating
1096 the temp register, so we don't accidentally clobber something we'll be
1097 needing again immediately. This is why we get idxRegCsBase here. */
1098 uint8_t const idxRegPc = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
1099 kIemNativeGstRegUse_ReadOnly);
1100 uint8_t const idxRegCsBase = IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) ? UINT8_MAX
1101 : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS),
1102 kIemNativeGstRegUse_ReadOnly);
1103
1104 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off);
1105
1106#ifdef VBOX_STRICT
1107 /* Do assertions before idxRegTmp contains anything. */
1108 Assert(RT_SIZEOFMEMB(VMCPUCC, iem.s.cbInstrBufTotal) == sizeof(uint16_t));
1109# ifdef RT_ARCH_AMD64
1110 {
1111 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8+2+1 + 11+2+1);
1112 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
1113 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1114 {
1115 /* cmp r/m64, imm8 */
1116 pbCodeBuf[off++] = X86_OP_REX_W;
1117 pbCodeBuf[off++] = 0x83;
1118 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 7, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base));
1119 pbCodeBuf[off++] = 0;
1120 /* je rel8 */
1121 pbCodeBuf[off++] = 0x74;
1122 pbCodeBuf[off++] = 1;
1123 /* int3 */
1124 pbCodeBuf[off++] = 0xcc;
1125
1126 }
1127
1128 /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); - done later by the non-x86 code */
1129 /* test r/m64, imm32 */
1130 pbCodeBuf[off++] = X86_OP_REX_W;
1131 pbCodeBuf[off++] = 0xf7;
1132 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 0, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1133 pbCodeBuf[off++] = RT_BYTE1(X86_PAGE_OFFSET_MASK);
1134 pbCodeBuf[off++] = RT_BYTE2(X86_PAGE_OFFSET_MASK);
1135 pbCodeBuf[off++] = RT_BYTE3(X86_PAGE_OFFSET_MASK);
1136 pbCodeBuf[off++] = RT_BYTE4(X86_PAGE_OFFSET_MASK);
1137 /* jz rel8 */
1138 pbCodeBuf[off++] = 0x74;
1139 pbCodeBuf[off++] = 1;
1140 /* int3 */
1141 pbCodeBuf[off++] = 0xcc;
1142 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1143 }
1144# else
1145
1146 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
1147 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1148 {
1149 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base));
1150# ifdef RT_ARCH_ARM64
1151 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
1152 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, idxRegTmp);
1153 pu32CodeBuf[off++] = Armv8A64MkInstrBrk(0x2004);
1154 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1155# else
1156# error "Port me!"
1157# endif
1158 }
1159# endif
1160
1161#endif /* VBOX_STRICT */
1162
1163 /* 1+2. Calculate 'off' first (into idxRegTmp). */
1164 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.uInstrBufPc));
1165 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1166 {
1167#ifdef RT_ARCH_ARM64
1168 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1169 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegPc, idxRegTmp);
1170 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1171#else
1172 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
1173 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
1174#endif
1175 }
1176 else
1177 {
1178#ifdef RT_ARCH_ARM64
1179 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
1180 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegCsBase, idxRegTmp);
1181 pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegPc);
1182 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1183#else
1184 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
1185 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegCsBase);
1186 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
1187#endif
1188 iemNativeRegFreeTmp(pReNative, idxRegCsBase);
1189 }
1190 iemNativeRegFreeTmp(pReNative, idxRegPc);
1191
1192 /* 3. Check that off is less than X86_PAGE_SIZE/cbInstrBufTotal. */
1193 off = iemNativeEmitCmpGprWithImm(pReNative, off, idxRegTmp, X86_PAGE_SIZE - 1);
1194 off = iemNativeEmitJaToNewLabel(pReNative, off, kIemNativeLabelType_CheckBranchMiss);
1195
1196 /* 4. Add iem.s.GCPhysInstrBuf and compare with GCPhysRangePageWithOffset. */
1197#ifdef RT_ARCH_AMD64
1198 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1199 pbCodeBuf[off++] = idxRegTmp < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R;
1200 pbCodeBuf[off++] = 0x03; /* add r64, r/m64 */
1201 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1202 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1203
1204#elif defined(RT_ARCH_ARM64)
1205 uint8_t const idxRegTmp2 = iemNativeRegAllocTmp(pReNative, &off);
1206
1207 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp2, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1208 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1209 pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegTmp2);
1210 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1211
1212# ifdef VBOX_STRICT /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); */
1213 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegTmp2, X86_PAGE_OFFSET_MASK, true /*fSetFlags*/);
1214 off = iemNativeEmitJzToFixed(pReNative, off, off + 2 /* correct for ARM64 */);
1215 off = iemNativeEmitBrk(pReNative, off, 0x2005);
1216# endif
1217 iemNativeRegFreeTmp(pReNative, idxRegTmp2);
1218#else
1219# error "Port me"
1220#endif
1221
1222 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(pTb, idxRange)
1223 | pTb->aRanges[idxRange].offPhysPage)
1224 + offRange;
1225 off = iemNativeEmitTestIfGprNotEqualImmAndJmpToNewLabel(pReNative, off, idxRegTmp, GCPhysRangePageWithOffset,
1226 kIemNativeLabelType_CheckBranchMiss);
1227
1228 iemNativeRegFreeTmp(pReNative, idxRegTmp);
1229 return off;
1230}
1231
1232
1233/**
1234 * Macro that implements TLB loading and updating pbInstrBuf updating for an
1235 * instruction crossing into a new page.
1236 *
1237 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
1238 */
1239#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) \
1240 RT_NOREF(a_cbInstr); \
1241 off = iemNativeEmitBltLoadTlbForNewPage(pReNative, off, pTb, a_idxRange, a_offInstr)
1242
1243#define LIVENESS_LOAD_TLB_FOR_NEW_PAGE(a_pOutgoing, a_pCallEntry) \
1244 if (!IEM_F_MODE_X86_IS_FLAT((uint32_t)(a_pCallEntry)->auParams[0] >> 8)) \
1245 IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, X86_SREG_CS); \
1246 else do { } while (0)
1247
1248DECL_FORCE_INLINE(uint32_t)
1249iemNativeEmitBltLoadTlbForNewPage(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb, uint8_t idxRange, uint8_t offInstr)
1250{
1251#ifdef VBOX_STRICT
1252 off = iemNativeEmitMarker(pReNative, off, 0x80000005);
1253#endif
1254
1255 /*
1256 * Define labels and allocate the register for holding the GCPhys of the new page.
1257 */
1258 uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++;
1259 uint32_t const idxRegGCPhys = iemNativeRegAllocTmp(pReNative, &off);
1260 IEMNATIVEEMITTLBSTATE const TlbState(pReNative, IEM_F_MODE_X86_IS_FLAT(pReNative->fExec), &off);
1261 uint32_t const idxLabelTlbLookup = !TlbState.fSkip
1262 ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbLookup, UINT32_MAX, uTlbSeqNo)
1263 : UINT32_MAX;
1264
1265 //off = iemNativeEmitBrk(pReNative, off, 0x1111);
1266
1267 /*
1268 * Jump to the TLB lookup code.
1269 */
1270 if (!TlbState.fSkip)
1271 off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbLookup); /** @todo short jump */
1272
1273 /*
1274 * TlbMiss:
1275 *
1276 * Call iemNativeHlpMemCodeNewPageTlbMissWithOff to do the work.
1277 */
1278 uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, off, uTlbSeqNo);
1279
1280 /* Save variables in volatile registers. */
1281 uint32_t const fHstRegsNotToSave = TlbState.getRegsNotToSave() | RT_BIT_32(idxRegGCPhys);
1282 off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave);
1283
1284 /* IEMNATIVE_CALL_ARG1_GREG = offInstr */
1285 off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, offInstr);
1286
1287 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
1288 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
1289
1290 /* Done setting up parameters, make the call. */
1291 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMissWithOff);
1292
1293 /* Move the result to the right register. */
1294 if (idxRegGCPhys != IEMNATIVE_CALL_RET_GREG)
1295 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegGCPhys, IEMNATIVE_CALL_RET_GREG);
1296
1297 /* Restore variables and guest shadow registers to volatile registers. */
1298 off = iemNativeVarRestoreVolatileRegsPostHlpCall(pReNative, off, fHstRegsNotToSave);
1299 off = iemNativeRegRestoreGuestShadowsInVolatileRegs(pReNative, off, TlbState.getActiveRegsWithShadows(true /*fCode*/));
1300
1301#ifdef IEMNATIVE_WITH_TLB_LOOKUP
1302 if (!TlbState.fSkip)
1303 {
1304 /* end of TlbMiss - Jump to the done label. */
1305 uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo);
1306 off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbDone);
1307
1308 /*
1309 * TlbLookup:
1310 */
1311 off = iemNativeEmitTlbLookup<false>(pReNative, off, &TlbState,
1312 IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) ? UINT8_MAX : X86_SREG_CS,
1313 1 /*cbMem*/, 0 /*fAlignMask*/, IEM_ACCESS_TYPE_EXEC,
1314 idxLabelTlbLookup, idxLabelTlbMiss, idxRegGCPhys, offInstr);
1315
1316# ifdef VBOX_WITH_STATISTICS
1317 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2,
1318 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeCodeTlbHitsForNewPageWithOffset));
1319# endif
1320
1321 /*
1322 * TlbDone:
1323 */
1324 iemNativeLabelDefine(pReNative, idxLabelTlbDone, off);
1325 TlbState.freeRegsAndReleaseVars(pReNative, UINT8_MAX /*idxVarGCPtrMem*/, true /*fIsCode*/);
1326 }
1327#else
1328 RT_NOREF(idxLabelTlbMiss);
1329#endif
1330
1331 /*
1332 * Now check the physical address of the page matches the expected one.
1333 */
1334 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(pTb, idxRange);
1335 off = iemNativeEmitTestIfGprNotEqualImmAndJmpToNewLabel(pReNative, off, idxRegGCPhys, GCPhysNewPage,
1336 kIemNativeLabelType_ObsoleteTb);
1337
1338 iemNativeRegFreeTmp(pReNative, idxRegGCPhys);
1339 return off;
1340}
1341
1342
1343/**
1344 * Macro that implements TLB loading and updating pbInstrBuf updating when
1345 * branching or when crossing a page on an instruction boundrary.
1346 *
1347 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
1348 * it is an inter-page branch and also check the page offset.
1349 *
1350 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
1351 */
1352#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) \
1353 RT_NOREF(a_cbInstr); \
1354 off = iemNativeEmitBltLoadTlbAfterBranch(pReNative, off, pTb, a_idxRange)
1355
1356#define LIVENESS_LOAD_TLB_AFTER_BRANCH(a_pOutgoing, a_pCallEntry) \
1357 if (!IEM_F_MODE_X86_IS_FLAT((uint32_t)(a_pCallEntry)->auParams[0] >> 8)) \
1358 IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, X86_SREG_CS); \
1359 else do { } while (0)
1360
1361DECL_FORCE_INLINE(uint32_t)
1362iemNativeEmitBltLoadTlbAfterBranch(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb, uint8_t idxRange)
1363{
1364#ifdef VBOX_STRICT
1365 off = iemNativeEmitMarker(pReNative, off, 0x80000006);
1366#endif
1367
1368 BODY_FLUSH_PENDING_WRITES();
1369
1370 /*
1371 * Define labels and allocate the register for holding the GCPhys of the new page.
1372 */
1373 uint32_t const idxLabelCheckBranchMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_CheckBranchMiss);
1374 uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++;
1375 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(pTb, idxRange)
1376 | pTb->aRanges[idxRange].offPhysPage;
1377
1378 /*
1379 *
1380 * First check if RIP is within the current code.
1381 *
1382 * This is very similar to iemNativeEmitBltInCheckPcAfterBranch, the only
1383 * difference is what we do when stuff doesn't match up.
1384 *
1385 * What we to do is.
1386 * 1. Calculate the FLAT PC (RIP + CS.BASE).
1387 * 2. Subtract iem.s.uInstrBufPc from it and getting 'off'.
1388 * 3. The 'off' must be less than X86_PAGE_SIZE/cbInstrBufTotal or
1389 * we need to retranslate RIP via the TLB.
1390 * 4. Add 'off' to iem.s.GCPhysInstrBuf and compare with the
1391 * GCPhysRangePageWithOffset constant mentioned above.
1392 *
1393 * The adding of CS.BASE to RIP can be skipped in the first step if we're
1394 * in 64-bit code or flat 32-bit.
1395 *
1396 */
1397
1398 /* Allocate registers for step 1. Get the shadowed stuff before allocating
1399 the temp register, so we don't accidentally clobber something we'll be
1400 needing again immediately. This is why we get idxRegCsBase here.
1401 Update: We share registers with the TlbState, as the TLB code path has
1402 little in common with the rest of the code. */
1403 bool const fIsFlat = IEM_F_MODE_X86_IS_FLAT(pReNative->fExec);
1404 IEMNATIVEEMITTLBSTATE const TlbState(pReNative, fIsFlat, &off);
1405 uint8_t const idxRegPc = !TlbState.fSkip ? TlbState.idxRegPtr
1406 : iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
1407 kIemNativeGstRegUse_ReadOnly, true /*fNoVolatileRegs*/);
1408 uint8_t const idxRegCsBase = !TlbState.fSkip || fIsFlat ? TlbState.idxRegSegBase
1409 : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS),
1410 kIemNativeGstRegUse_ReadOnly, true /*fNoVolatileRegs*/);
1411
1412 uint8_t const idxRegTmp = !TlbState.fSkip ? TlbState.idxReg1 : iemNativeRegAllocTmp(pReNative, &off);
1413 uint8_t const idxRegTmp2 = !TlbState.fSkip ? TlbState.idxReg2 : iemNativeRegAllocTmp(pReNative, &off);
1414 uint8_t const idxRegDummy = !TlbState.fSkip ? iemNativeRegAllocTmp(pReNative, &off) : UINT8_MAX;
1415
1416#ifdef VBOX_STRICT
1417 /* Do assertions before idxRegTmp contains anything. */
1418 Assert(RT_SIZEOFMEMB(VMCPUCC, iem.s.cbInstrBufTotal) == sizeof(uint16_t));
1419# ifdef RT_ARCH_AMD64
1420 {
1421 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8+2+1 + 11+2+1);
1422 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
1423 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1424 {
1425 /* cmp r/m64, imm8 */
1426 pbCodeBuf[off++] = X86_OP_REX_W;
1427 pbCodeBuf[off++] = 0x83;
1428 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 7, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base));
1429 pbCodeBuf[off++] = 0;
1430 /* je rel8 */
1431 pbCodeBuf[off++] = 0x74;
1432 pbCodeBuf[off++] = 1;
1433 /* int3 */
1434 pbCodeBuf[off++] = 0xcc;
1435
1436 }
1437
1438 /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); - done later by the non-x86 code */
1439 /* test r/m64, imm32 */
1440 pbCodeBuf[off++] = X86_OP_REX_W;
1441 pbCodeBuf[off++] = 0xf7;
1442 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 0, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1443 pbCodeBuf[off++] = RT_BYTE1(X86_PAGE_OFFSET_MASK);
1444 pbCodeBuf[off++] = RT_BYTE2(X86_PAGE_OFFSET_MASK);
1445 pbCodeBuf[off++] = RT_BYTE3(X86_PAGE_OFFSET_MASK);
1446 pbCodeBuf[off++] = RT_BYTE4(X86_PAGE_OFFSET_MASK);
1447 /* jz rel8 */
1448 pbCodeBuf[off++] = 0x74;
1449 pbCodeBuf[off++] = 1;
1450 /* int3 */
1451 pbCodeBuf[off++] = 0xcc;
1452 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1453 }
1454# else
1455
1456 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
1457 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1458 {
1459 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base));
1460# ifdef RT_ARCH_ARM64
1461 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
1462 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, idxRegTmp);
1463 pu32CodeBuf[off++] = Armv8A64MkInstrBrk(0x2006);
1464 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1465# else
1466# error "Port me!"
1467# endif
1468 }
1469# endif
1470
1471#endif /* VBOX_STRICT */
1472
1473 /* Because we're lazy, we'll jump back here to recalc 'off' and share the
1474 GCPhysRangePageWithOffset check. This is a little risky, so we use the
1475 2nd register to check if we've looped more than once already.*/
1476 off = iemNativeEmitGprZero(pReNative, off, idxRegTmp2);
1477
1478 uint32_t const offLabelRedoChecks = off;
1479
1480 /* 1+2. Calculate 'off' first (into idxRegTmp). */
1481 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.uInstrBufPc));
1482 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1483 {
1484#ifdef RT_ARCH_ARM64
1485 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1486 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegPc, idxRegTmp);
1487 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1488#else
1489 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
1490 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
1491#endif
1492 }
1493 else
1494 {
1495#ifdef RT_ARCH_ARM64
1496 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
1497 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegCsBase, idxRegTmp);
1498 pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegPc);
1499 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1500#else
1501 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
1502 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegCsBase);
1503 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
1504#endif
1505 }
1506
1507 /* 3. Check that off is less than X86_PAGE_SIZE/cbInstrBufTotal.
1508 Unlike iemNativeEmitBltInCheckPcAfterBranch we'll jump to the TLB loading if this fails. */
1509 off = iemNativeEmitCmpGprWithImm(pReNative, off, idxRegTmp, X86_PAGE_SIZE - 1);
1510 uint32_t const offFixedJumpToTlbLoad = off;
1511 off = iemNativeEmitJaToFixed(pReNative, off, off /* (ASSUME ja rel8 suffices) */);
1512
1513 /* 4a. Add iem.s.GCPhysInstrBuf to off ... */
1514#ifdef RT_ARCH_AMD64
1515 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1516 pbCodeBuf[off++] = idxRegTmp < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R;
1517 pbCodeBuf[off++] = 0x03; /* add r64, r/m64 */
1518 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1519 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1520
1521#elif defined(RT_ARCH_ARM64)
1522
1523 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp2, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1524 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1525 pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegTmp2);
1526 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1527
1528# ifdef VBOX_STRICT /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); */
1529 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegTmp2, X86_PAGE_OFFSET_MASK, true /*fSetFlags*/);
1530 off = iemNativeEmitJzToFixed(pReNative, off, off + 2 /* correct for ARM64 */);
1531 off = iemNativeEmitBrk(pReNative, off, 0x2005);
1532# endif
1533#else
1534# error "Port me"
1535#endif
1536
1537 /* 4b. ... and compare with GCPhysRangePageWithOffset.
1538
1539 Unlike iemNativeEmitBltInCheckPcAfterBranch we'll have to be more
1540 careful and avoid implicit temporary register usage here.
1541
1542 Unlike the threaded version of this code, we do not obsolete TBs here to
1543 reduce the code size and because indirect calls may legally end at the
1544 same offset in two different pages depending on the program state. */
1545 /** @todo synch the threaded BODY_LOAD_TLB_AFTER_BRANCH version with this. */
1546 off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegTmp2, GCPhysRangePageWithOffset);
1547 off = iemNativeEmitCmpGprWithGpr(pReNative, off, idxRegTmp, idxRegTmp2);
1548 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabelCheckBranchMiss);
1549 uint32_t const offFixedJumpToEnd = off;
1550 off = iemNativeEmitJmpToFixed(pReNative, off, off + 512 /* force rel32 */);
1551
1552 /*
1553 * TlbLoad:
1554 *
1555 * First we try to go via the TLB.
1556 */
1557 iemNativeFixupFixedJump(pReNative, offFixedJumpToTlbLoad, off);
1558
1559 /* Check that we haven't been here before. */
1560 off = iemNativeEmitTestIfGprIsNotZeroAndJmpToLabel(pReNative, off, idxRegTmp2, false /*f64Bit*/, idxLabelCheckBranchMiss);
1561
1562 /* Jump to the TLB lookup code. */
1563 uint32_t const idxLabelTlbLookup = !TlbState.fSkip
1564 ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbLookup, UINT32_MAX, uTlbSeqNo)
1565 : UINT32_MAX;
1566//off = iemNativeEmitBrk(pReNative, off, 0x1234);
1567 if (!TlbState.fSkip)
1568 off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbLookup); /** @todo short jump */
1569
1570 /*
1571 * TlbMiss:
1572 *
1573 * Call iemNativeHlpMemCodeNewPageTlbMiss to do the work.
1574 */
1575 uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, off, uTlbSeqNo);
1576 RT_NOREF(idxLabelTlbMiss);
1577
1578 /* Save variables in volatile registers. */
1579 uint32_t const fHstRegsNotToSave = TlbState.getRegsNotToSave() | RT_BIT_32(idxRegTmp) | RT_BIT_32(idxRegTmp2)
1580 | (idxRegDummy != UINT8_MAX ? RT_BIT_32(idxRegDummy) : 0);
1581 off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave);
1582
1583 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
1584 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
1585
1586 /* Done setting up parameters, make the call. */
1587 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMiss);
1588
1589 /* Restore variables and guest shadow registers to volatile registers. */
1590 off = iemNativeVarRestoreVolatileRegsPostHlpCall(pReNative, off, fHstRegsNotToSave);
1591 off = iemNativeRegRestoreGuestShadowsInVolatileRegs(pReNative, off,
1592 TlbState.getActiveRegsWithShadows()
1593 | RT_BIT_32(idxRegPc)
1594 | (idxRegCsBase != UINT8_MAX ? RT_BIT_32(idxRegCsBase) : 0));
1595
1596#ifdef IEMNATIVE_WITH_TLB_LOOKUP
1597 if (!TlbState.fSkip)
1598 {
1599 /* end of TlbMiss - Jump to the done label. */
1600 uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo);
1601 off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbDone);
1602
1603 /*
1604 * TlbLookup:
1605 */
1606 off = iemNativeEmitTlbLookup<false, true>(pReNative, off, &TlbState, fIsFlat ? UINT8_MAX : X86_SREG_CS,
1607 1 /*cbMem*/, 0 /*fAlignMask*/, IEM_ACCESS_TYPE_EXEC,
1608 idxLabelTlbLookup, idxLabelTlbMiss, idxRegDummy);
1609
1610# ifdef VBOX_WITH_STATISTICS
1611 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2,
1612 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeCodeTlbHitsForNewPage));
1613# endif
1614
1615 /*
1616 * TlbDone:
1617 */
1618 iemNativeLabelDefine(pReNative, idxLabelTlbDone, off);
1619 TlbState.freeRegsAndReleaseVars(pReNative, UINT8_MAX /*idxVarGCPtrMem*/, true /*fIsCode*/);
1620 }
1621#else
1622 RT_NOREF(idxLabelTlbMiss);
1623#endif
1624
1625 /* Jmp back to the start and redo the checks. */
1626 off = iemNativeEmitLoadGpr8Imm(pReNative, off, idxRegTmp2, 1); /* indicate that we've looped once already */
1627 off = iemNativeEmitJmpToFixed(pReNative, off, offLabelRedoChecks);
1628
1629 /*
1630 * End:
1631 *
1632 * The end.
1633 */
1634 iemNativeFixupFixedJump(pReNative, offFixedJumpToEnd, off);
1635
1636 if (!TlbState.fSkip)
1637 iemNativeRegFreeTmp(pReNative, idxRegDummy);
1638 else
1639 {
1640 iemNativeRegFreeTmp(pReNative, idxRegTmp2);
1641 iemNativeRegFreeTmp(pReNative, idxRegTmp);
1642 iemNativeRegFreeTmp(pReNative, idxRegPc);
1643 if (idxRegCsBase != UINT8_MAX)
1644 iemNativeRegFreeTmp(pReNative, idxRegCsBase);
1645 }
1646 return off;
1647}
1648
1649
1650#ifdef BODY_CHECK_CS_LIM
1651/**
1652 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
1653 * raising a \#GP(0) if this isn't the case.
1654 */
1655IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLim)
1656{
1657 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1658 BODY_SET_CUR_INSTR();
1659 BODY_FLUSH_PENDING_WRITES();
1660 BODY_CHECK_CS_LIM(cbInstr);
1661 return off;
1662}
1663
1664IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLim)
1665{
1666 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1667 LIVENESS_CHECK_CS_LIM(pOutgoing);
1668 RT_NOREF(pCallEntry);
1669}
1670#endif
1671
1672
1673#if defined(BODY_CHECK_OPCODES) && defined(BODY_CHECK_CS_LIM)
1674/**
1675 * Built-in function for re-checking opcodes and CS.LIM after an instruction
1676 * that may have modified them.
1677 */
1678IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodes)
1679{
1680 PCIEMTB const pTb = pReNative->pTbOrg;
1681 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1682 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1683 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1684 BODY_SET_CUR_INSTR();
1685 BODY_FLUSH_PENDING_WRITES();
1686 BODY_CHECK_CS_LIM(cbInstr);
1687 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1688 return off;
1689}
1690
1691IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodes)
1692{
1693 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1694 LIVENESS_CHECK_CS_LIM(pOutgoing);
1695 LIVENESS_CHECK_OPCODES(pOutgoing);
1696 RT_NOREF(pCallEntry);
1697}
1698#endif
1699
1700
1701#if defined(BODY_CHECK_OPCODES)
1702/**
1703 * Built-in function for re-checking opcodes after an instruction that may have
1704 * modified them.
1705 */
1706IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodes)
1707{
1708 PCIEMTB const pTb = pReNative->pTbOrg;
1709 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1710 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1711 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1712 BODY_SET_CUR_INSTR();
1713 BODY_FLUSH_PENDING_WRITES();
1714 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1715 return off;
1716}
1717
1718IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodes)
1719{
1720 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1721 LIVENESS_CHECK_OPCODES(pOutgoing);
1722 RT_NOREF(pCallEntry);
1723}
1724#endif
1725
1726
1727#if defined(BODY_CHECK_OPCODES) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
1728/**
1729 * Built-in function for re-checking opcodes and considering the need for CS.LIM
1730 * checking after an instruction that may have modified them.
1731 */
1732IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesConsiderCsLim)
1733{
1734 PCIEMTB const pTb = pReNative->pTbOrg;
1735 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1736 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1737 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1738 BODY_SET_CUR_INSTR();
1739 BODY_FLUSH_PENDING_WRITES();
1740 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
1741 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1742 return off;
1743}
1744
1745IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesConsiderCsLim)
1746{
1747 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1748 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
1749 LIVENESS_CHECK_OPCODES(pOutgoing);
1750 RT_NOREF(pCallEntry);
1751}
1752#endif
1753
1754
1755/*
1756 * Post-branching checkers.
1757 */
1758
1759#if defined(BODY_CHECK_OPCODES) && defined(BODY_CHECK_PC_AFTER_BRANCH) && defined(BODY_CHECK_CS_LIM)
1760/**
1761 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
1762 * after conditional branching within the same page.
1763 *
1764 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
1765 */
1766IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndPcAndOpcodes)
1767{
1768 PCIEMTB const pTb = pReNative->pTbOrg;
1769 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1770 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1771 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1772 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
1773 BODY_SET_CUR_INSTR();
1774 BODY_FLUSH_PENDING_WRITES();
1775 BODY_CHECK_CS_LIM(cbInstr);
1776 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
1777 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1778 //LogFunc(("okay\n"));
1779 return off;
1780}
1781
1782IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndPcAndOpcodes)
1783{
1784 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1785 LIVENESS_CHECK_CS_LIM(pOutgoing);
1786 LIVENESS_CHECK_PC_AFTER_BRANCH(pOutgoing, pCallEntry);
1787 LIVENESS_CHECK_OPCODES(pOutgoing);
1788 RT_NOREF(pCallEntry);
1789}
1790#endif
1791
1792
1793#if defined(BODY_CHECK_OPCODES) && defined(BODY_CHECK_PC_AFTER_BRANCH)
1794/**
1795 * Built-in function for checking the PC and checking opcodes after conditional
1796 * branching within the same page.
1797 *
1798 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
1799 */
1800IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckPcAndOpcodes)
1801{
1802 PCIEMTB const pTb = pReNative->pTbOrg;
1803 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1804 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1805 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1806 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
1807 BODY_SET_CUR_INSTR();
1808 BODY_FLUSH_PENDING_WRITES();
1809 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
1810 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1811 //LogFunc(("okay\n"));
1812 return off;
1813}
1814
1815IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckPcAndOpcodes)
1816{
1817 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1818 LIVENESS_CHECK_PC_AFTER_BRANCH(pOutgoing, pCallEntry);
1819 LIVENESS_CHECK_OPCODES(pOutgoing);
1820 RT_NOREF(pCallEntry);
1821}
1822#endif
1823
1824
1825#if defined(BODY_CHECK_OPCODES) && defined(BODY_CHECK_PC_AFTER_BRANCH) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
1826/**
1827 * Built-in function for checking the PC and checking opcodes and considering
1828 * the need for CS.LIM checking after conditional branching within the same
1829 * page.
1830 *
1831 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
1832 */
1833IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
1834{
1835 PCIEMTB const pTb = pReNative->pTbOrg;
1836 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1837 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1838 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1839 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
1840 BODY_SET_CUR_INSTR();
1841 BODY_FLUSH_PENDING_WRITES();
1842 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
1843 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
1844 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1845 //LogFunc(("okay\n"));
1846 return off;
1847}
1848
1849IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
1850{
1851 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1852 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
1853 LIVENESS_CHECK_PC_AFTER_BRANCH(pOutgoing, pCallEntry);
1854 LIVENESS_CHECK_OPCODES(pOutgoing);
1855 RT_NOREF(pCallEntry);
1856}
1857#endif
1858
1859
1860#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_AFTER_BRANCH) && defined(BODY_CHECK_CS_LIM)
1861/**
1862 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
1863 * transitioning to a different code page.
1864 *
1865 * The code page transition can either be natural over onto the next page (with
1866 * the instruction starting at page offset zero) or by means of branching.
1867 *
1868 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
1869 */
1870IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
1871{
1872 PCIEMTB const pTb = pReNative->pTbOrg;
1873 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1874 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1875 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1876 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
1877 BODY_SET_CUR_INSTR();
1878 BODY_FLUSH_PENDING_WRITES();
1879 BODY_CHECK_CS_LIM(cbInstr);
1880 Assert(offRange == 0);
1881 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
1882 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1883 //LogFunc(("okay\n"));
1884 return off;
1885}
1886
1887IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
1888{
1889 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1890 LIVENESS_CHECK_CS_LIM(pOutgoing);
1891 LIVENESS_LOAD_TLB_AFTER_BRANCH(pOutgoing, pCallEntry);
1892 LIVENESS_CHECK_OPCODES(pOutgoing);
1893 RT_NOREF(pCallEntry);
1894}
1895#endif
1896
1897
1898#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_AFTER_BRANCH)
1899/**
1900 * Built-in function for loading TLB and checking opcodes when transitioning to
1901 * a different code page.
1902 *
1903 * The code page transition can either be natural over onto the next page (with
1904 * the instruction starting at page offset zero) or by means of branching.
1905 *
1906 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
1907 */
1908IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesLoadingTlb)
1909{
1910 PCIEMTB const pTb = pReNative->pTbOrg;
1911 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1912 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1913 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1914 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
1915 BODY_SET_CUR_INSTR();
1916 BODY_FLUSH_PENDING_WRITES();
1917 Assert(offRange == 0);
1918 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
1919 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1920 //LogFunc(("okay\n"));
1921 return off;
1922}
1923
1924IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesLoadingTlb)
1925{
1926 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1927 LIVENESS_LOAD_TLB_AFTER_BRANCH(pOutgoing, pCallEntry);
1928 LIVENESS_CHECK_OPCODES(pOutgoing);
1929 RT_NOREF(pCallEntry);
1930}
1931#endif
1932
1933
1934#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_AFTER_BRANCH) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
1935/**
1936 * Built-in function for loading TLB and checking opcodes and considering the
1937 * need for CS.LIM checking when transitioning to a different code page.
1938 *
1939 * The code page transition can either be natural over onto the next page (with
1940 * the instruction starting at page offset zero) or by means of branching.
1941 *
1942 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
1943 */
1944IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
1945{
1946 PCIEMTB const pTb = pReNative->pTbOrg;
1947 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1948 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1949 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1950 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
1951 BODY_SET_CUR_INSTR();
1952 BODY_FLUSH_PENDING_WRITES();
1953 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
1954 Assert(offRange == 0);
1955 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
1956 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1957 //LogFunc(("okay\n"));
1958 return off;
1959}
1960
1961IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
1962{
1963 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
1964 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
1965 LIVENESS_LOAD_TLB_AFTER_BRANCH(pOutgoing, pCallEntry);
1966 LIVENESS_CHECK_OPCODES(pOutgoing);
1967 RT_NOREF(pCallEntry);
1968}
1969#endif
1970
1971
1972
1973/*
1974 * Natural page crossing checkers.
1975 */
1976
1977#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CHECK_CS_LIM)
1978/**
1979 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
1980 * both pages when transitioning to a different code page.
1981 *
1982 * This is used when the previous instruction requires revalidation of opcodes
1983 * bytes and the current instruction stries a page boundrary with opcode bytes
1984 * in both the old and new page.
1985 *
1986 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
1987 */
1988IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
1989{
1990 PCIEMTB const pTb = pReNative->pTbOrg;
1991 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1992 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
1993 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
1994 uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
1995 uint32_t const idxRange2 = idxRange1 + 1;
1996 BODY_SET_CUR_INSTR();
1997 BODY_FLUSH_PENDING_WRITES();
1998 BODY_CHECK_CS_LIM(cbInstr);
1999 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
2000 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2001 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2002 return off;
2003}
2004
2005IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
2006{
2007 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
2008 LIVENESS_CHECK_CS_LIM(pOutgoing);
2009 LIVENESS_CHECK_OPCODES(pOutgoing);
2010 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2011 RT_NOREF(pCallEntry);
2012}
2013#endif
2014
2015
2016#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE)
2017/**
2018 * Built-in function for loading TLB and checking opcodes on both pages when
2019 * transitioning to a different code page.
2020 *
2021 * This is used when the previous instruction requires revalidation of opcodes
2022 * bytes and the current instruction stries a page boundrary with opcode bytes
2023 * in both the old and new page.
2024 *
2025 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
2026 */
2027IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
2028{
2029 PCIEMTB const pTb = pReNative->pTbOrg;
2030 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2031 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2032 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2033 uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
2034 uint32_t const idxRange2 = idxRange1 + 1;
2035 BODY_SET_CUR_INSTR();
2036 BODY_FLUSH_PENDING_WRITES();
2037 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
2038 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2039 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2040 return off;
2041}
2042
2043IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
2044{
2045 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
2046 LIVENESS_CHECK_OPCODES(pOutgoing);
2047 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2048 RT_NOREF(pCallEntry);
2049}
2050#endif
2051
2052
2053#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
2054/**
2055 * Built-in function for loading TLB and checking opcodes on both pages and
2056 * considering the need for CS.LIM checking when transitioning to a different
2057 * code page.
2058 *
2059 * This is used when the previous instruction requires revalidation of opcodes
2060 * bytes and the current instruction stries a page boundrary with opcode bytes
2061 * in both the old and new page.
2062 *
2063 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
2064 */
2065IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
2066{
2067 PCIEMTB const pTb = pReNative->pTbOrg;
2068 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2069 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2070 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2071 uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
2072 uint32_t const idxRange2 = idxRange1 + 1;
2073 BODY_SET_CUR_INSTR();
2074 BODY_FLUSH_PENDING_WRITES();
2075 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
2076 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
2077 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2078 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2079 return off;
2080}
2081
2082IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
2083{
2084 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
2085 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
2086 LIVENESS_CHECK_OPCODES(pOutgoing);
2087 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2088 RT_NOREF(pCallEntry);
2089}
2090#endif
2091
2092
2093#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CHECK_CS_LIM)
2094/**
2095 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
2096 * advancing naturally to a different code page.
2097 *
2098 * Only opcodes on the new page is checked.
2099 *
2100 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
2101 */
2102IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
2103{
2104 PCIEMTB const pTb = pReNative->pTbOrg;
2105 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2106 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2107 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2108 //uint32_t const offRange1 = (uint32_t)uParam2;
2109 uint32_t const idxRange2 = idxRange1 + 1;
2110 BODY_SET_CUR_INSTR();
2111 BODY_FLUSH_PENDING_WRITES();
2112 BODY_CHECK_CS_LIM(cbInstr);
2113 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2114 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2115 return off;
2116}
2117
2118IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
2119{
2120 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
2121 LIVENESS_CHECK_CS_LIM(pOutgoing);
2122 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2123 LIVENESS_CHECK_OPCODES(pOutgoing);
2124 RT_NOREF(pCallEntry);
2125}
2126#endif
2127
2128
2129#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE)
2130/**
2131 * Built-in function for loading TLB and checking opcodes when advancing
2132 * naturally to a different code page.
2133 *
2134 * Only opcodes on the new page is checked.
2135 *
2136 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
2137 */
2138IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
2139{
2140 PCIEMTB const pTb = pReNative->pTbOrg;
2141 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2142 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2143 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2144 //uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
2145 uint32_t const idxRange2 = idxRange1 + 1;
2146 BODY_SET_CUR_INSTR();
2147 BODY_FLUSH_PENDING_WRITES();
2148 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2149 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2150 return off;
2151}
2152
2153IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
2154{
2155 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
2156 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2157 LIVENESS_CHECK_OPCODES(pOutgoing);
2158 RT_NOREF(pCallEntry);
2159}
2160#endif
2161
2162
2163#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
2164/**
2165 * Built-in function for loading TLB and checking opcodes and considering the
2166 * need for CS.LIM checking when advancing naturally to a different code page.
2167 *
2168 * Only opcodes on the new page is checked.
2169 *
2170 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
2171 */
2172IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
2173{
2174 PCIEMTB const pTb = pReNative->pTbOrg;
2175 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2176 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2177 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2178 //uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
2179 uint32_t const idxRange2 = idxRange1 + 1;
2180 BODY_SET_CUR_INSTR();
2181 BODY_FLUSH_PENDING_WRITES();
2182 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
2183 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2184 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2185 return off;
2186}
2187
2188IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
2189{
2190 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
2191 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
2192 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2193 LIVENESS_CHECK_OPCODES(pOutgoing);
2194 RT_NOREF(pCallEntry);
2195}
2196#endif
2197
2198
2199#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CHECK_CS_LIM)
2200/**
2201 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
2202 * advancing naturally to a different code page with first instr at byte 0.
2203 *
2204 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
2205 */
2206IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
2207{
2208 PCIEMTB const pTb = pReNative->pTbOrg;
2209 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2210 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2211 BODY_SET_CUR_INSTR();
2212 BODY_FLUSH_PENDING_WRITES();
2213 BODY_CHECK_CS_LIM(cbInstr);
2214 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
2215 //Assert(pVCpu->iem.s.offCurInstrStart == 0);
2216 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
2217 return off;
2218}
2219
2220IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
2221{
2222 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
2223 LIVENESS_CHECK_CS_LIM(pOutgoing);
2224 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2225 LIVENESS_CHECK_OPCODES(pOutgoing);
2226 RT_NOREF(pCallEntry);
2227}
2228#endif
2229
2230
2231#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE)
2232/**
2233 * Built-in function for loading TLB and checking opcodes when advancing
2234 * naturally to a different code page with first instr at byte 0.
2235 *
2236 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
2237 */
2238IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
2239{
2240 PCIEMTB const pTb = pReNative->pTbOrg;
2241 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2242 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2243 BODY_SET_CUR_INSTR();
2244 BODY_FLUSH_PENDING_WRITES();
2245 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
2246 //Assert(pVCpu->iem.s.offCurInstrStart == 0);
2247 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
2248 return off;
2249}
2250
2251IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
2252{
2253 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
2254 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2255 LIVENESS_CHECK_OPCODES(pOutgoing);
2256 RT_NOREF(pCallEntry);
2257}
2258#endif
2259
2260
2261#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
2262/**
2263 * Built-in function for loading TLB and checking opcodes and considering the
2264 * need for CS.LIM checking when advancing naturally to a different code page
2265 * with first instr at byte 0.
2266 *
2267 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
2268 */
2269IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
2270{
2271 PCIEMTB const pTb = pReNative->pTbOrg;
2272 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2273 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2274 BODY_SET_CUR_INSTR();
2275 BODY_FLUSH_PENDING_WRITES();
2276 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
2277 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
2278 //Assert(pVCpu->iem.s.offCurInstrStart == 0);
2279 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
2280 return off;
2281}
2282
2283IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
2284{
2285 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
2286 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
2287 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2288 LIVENESS_CHECK_OPCODES(pOutgoing);
2289 RT_NOREF(pCallEntry);
2290}
2291#endif
2292
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette