VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp@ 107044

Last change on this file since 107044 was 106465, checked in by vboxsync, 6 weeks ago

VMM/IEM: Added iemNativeEmitLoadGprWithGstReg[Ex]T and iemNativeEmitStoreGprToGstReg[Ex]T as better way of explictly loading & storing standard guest registers. bugref:10720

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 105.7 KB
Line 
1/* $Id: IEMAllN8veRecompBltIn.cpp 106465 2024-10-18 00:27:52Z vboxsync $ */
2/** @file
3 * IEM - Native Recompiler, Emitters for Built-In Threaded Functions.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM_RE_NATIVE
33#define IEM_WITH_OPAQUE_DECODER_STATE
34#define VMCPU_INCL_CPUM_GST_CTX
35#define VMM_INCLUDED_SRC_include_IEMMc_h /* block IEMMc.h inclusion. */
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/cpum.h>
38#include <VBox/vmm/dbgf.h>
39#include "IEMInternal.h"
40#include <VBox/vmm/vmcc.h>
41#include <VBox/log.h>
42#include <VBox/err.h>
43#include <VBox/param.h>
44#include <iprt/assert.h>
45#include <iprt/string.h>
46#if defined(RT_ARCH_AMD64)
47# include <iprt/x86.h>
48#elif defined(RT_ARCH_ARM64)
49# include <iprt/armv8.h>
50#endif
51
52
53#include "IEMInline.h"
54#include "IEMThreadedFunctions.h"
55#include "IEMN8veRecompiler.h"
56#include "IEMN8veRecompilerEmit.h"
57#include "IEMN8veRecompilerTlbLookup.h"
58#include "target-x86/IEMAllN8veEmit-x86.h"
59
60
61
62/*********************************************************************************************************************************
63* TB Helper Functions *
64*********************************************************************************************************************************/
65#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_ARM64)
66DECLASM(void) iemNativeHlpAsmSafeWrapLogCpuState(void);
67#endif
68
69
70/**
71 * Used by TB code to deal with a TLB miss for a new page.
72 */
73IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCodeNewPageTlbMiss,(PVMCPUCC pVCpu))
74{
75#ifdef IEM_WITH_TLB_STATISTICS
76 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage);
77#endif
78 pVCpu->iem.s.pbInstrBuf = NULL;
79 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE;
80 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE;
81 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL);
82 if (pVCpu->iem.s.pbInstrBuf)
83 { /* likely */ }
84 else
85 {
86 AssertMsgFailed(("cs:rip=%04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
87 IEM_DO_LONGJMP(pVCpu, VINF_SUCCESS);
88 }
89}
90
91
92/**
93 * Used by TB code to deal with a TLB miss for a new page.
94 */
95IEM_DECL_NATIVE_HLP_DEF(RTGCPHYS, iemNativeHlpMemCodeNewPageTlbMissWithOff,(PVMCPUCC pVCpu, uint8_t offInstr))
96{
97#ifdef IEM_WITH_TLB_STATISTICS
98 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset);
99#endif
100 pVCpu->iem.s.pbInstrBuf = NULL;
101 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - offInstr;
102 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE;
103 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL);
104 AssertMsg(pVCpu->iem.s.pbInstrBuf, ("cs:rip=%04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
105 return pVCpu->iem.s.pbInstrBuf ? pVCpu->iem.s.GCPhysInstrBuf : NIL_RTGCPHYS;
106}
107
108
109/*********************************************************************************************************************************
110* Builtin functions *
111*********************************************************************************************************************************/
112
113/**
114 * Built-in function that does nothing.
115 *
116 * Whether this is called or not can be controlled by the entry in the
117 * IEMThreadedGenerator.katBltIns table. This can be useful to determine
118 * whether why behaviour changes when enabling the LogCpuState builtins. I.e.
119 * whether it's the reduced call count in the TBs or the threaded calls flushing
120 * register state.
121 */
122IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_Nop)
123{
124 RT_NOREF(pReNative, pCallEntry);
125 return off;
126}
127
128IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_Nop)
129{
130 *pOutgoing = *pIncoming;
131 RT_NOREF(pCallEntry);
132}
133
134
135/**
136 * Emits for for LogCpuState.
137 *
138 * This shouldn't have any relevant impact on the recompiler state.
139 */
140IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_LogCpuState)
141{
142#ifdef RT_ARCH_AMD64
143 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);
144 /* push rax */
145 pbCodeBuf[off++] = 0x50 + X86_GREG_xAX;
146 /* push imm32 */
147 pbCodeBuf[off++] = 0x68;
148 pbCodeBuf[off++] = RT_BYTE1(pCallEntry->auParams[0]);
149 pbCodeBuf[off++] = RT_BYTE2(pCallEntry->auParams[0]);
150 pbCodeBuf[off++] = RT_BYTE3(pCallEntry->auParams[0]);
151 pbCodeBuf[off++] = RT_BYTE4(pCallEntry->auParams[0]);
152 /* mov rax, iemNativeHlpAsmSafeWrapLogCpuState */
153 pbCodeBuf[off++] = X86_OP_REX_W;
154 pbCodeBuf[off++] = 0xb8 + X86_GREG_xAX;
155 *(uint64_t *)&pbCodeBuf[off] = (uintptr_t)iemNativeHlpAsmSafeWrapLogCpuState;
156 off += sizeof(uint64_t);
157 /* call rax */
158 pbCodeBuf[off++] = 0xff;
159 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);
160 /* pop rax */
161 pbCodeBuf[off++] = 0x58 + X86_GREG_xAX;
162 /* pop rax */
163 pbCodeBuf[off++] = 0x58 + X86_GREG_xAX;
164#else
165 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpAsmSafeWrapLogCpuState);
166 RT_NOREF(pCallEntry);
167#endif
168
169 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
170 return off;
171}
172
173IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_LogCpuState)
174{
175 IEM_LIVENESS_RAW_INIT_WITH_CALL(pOutgoing, pIncoming);
176 RT_NOREF(pCallEntry);
177}
178
179
180/**
181 * Built-in function that calls a C-implemention function taking zero arguments.
182 */
183IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_DeferToCImpl0)
184{
185 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)pCallEntry->auParams[0];
186 uint8_t const cbInstr = (uint8_t)pCallEntry->auParams[1];
187 uint64_t const fGstShwFlush = pCallEntry->auParams[2];
188 return iemNativeEmitCImplCall(pReNative, off, pCallEntry->idxInstr, fGstShwFlush, (uintptr_t)pfnCImpl, cbInstr, 0, 0, 0, 0);
189}
190
191IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_DeferToCImpl0)
192{
193 IEM_LIVENESS_RAW_INIT_WITH_CALL(pOutgoing, pIncoming);
194 RT_NOREF(pCallEntry);
195}
196
197
198/**
199 * Flushes pending writes in preparation of raising an exception or aborting the TB.
200 */
201#define BODY_FLUSH_PENDING_WRITES() \
202 off = iemNativeRegFlushPendingWrites(pReNative, off);
203
204
205/**
206 * Worker for the CheckIrq, CheckTimers and CheckTimersAndIrq builtins below.
207 */
208template<bool const a_fCheckTimers, bool const a_fCheckIrqs>
209DECL_FORCE_INLINE_THROW(uint32_t) iemNativeRecompFunc_BltIn_CheckTimersAndIrqsCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off)
210{
211 uint8_t const idxEflReg = !a_fCheckIrqs ? UINT8_MAX
212 : iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off,
213 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER));
214 uint8_t const idxTmpReg1 = iemNativeRegAllocTmp(pReNative, &off);
215 uint8_t const idxTmpReg2 = a_fCheckIrqs ? iemNativeRegAllocTmp(pReNative, &off) : UINT8_MAX;
216 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off,
217 (RT_ARCH_VAL == RT_ARCH_VAL_AMD64 ? 72 : 32)
218 + IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS * 3);
219
220 /*
221 * First we decrement the timer poll counter, if so desired.
222 */
223 if (a_fCheckTimers)
224 {
225# ifdef RT_ARCH_AMD64
226 /* dec [rbx + cTbsTillNextTimerPoll] */
227 pCodeBuf[off++] = 0xff;
228 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 1, RT_UOFFSETOF(VMCPU, iem.s.cTbsTillNextTimerPoll));
229
230 /* jz ReturnBreakFF */
231 off = iemNativeEmitTbExitJccEx<kIemNativeLabelType_ReturnBreakFF>(pReNative, pCodeBuf, off, kIemNativeInstrCond_e);
232
233# elif defined(RT_ARCH_ARM64)
234 AssertCompile(RTASSERT_OFFSET_OF(VMCPU, iem.s.cTbsTillNextTimerPoll) < _4K * sizeof(uint32_t));
235 off = iemNativeEmitLoadGprFromVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.cTbsTillNextTimerPoll));
236 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxTmpReg1, idxTmpReg1, 1, false /*f64Bit*/);
237 off = iemNativeEmitStoreGprToVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.cTbsTillNextTimerPoll));
238
239 /* cbz reg1, ReturnBreakFF */
240 off = iemNativeEmitTbExitIfGprIsZeroEx<kIemNativeLabelType_ReturnBreakFF>(pReNative, pCodeBuf, off,
241 idxTmpReg1, false /*f64Bit*/);
242
243# else
244# error "port me"
245# endif
246 }
247
248 /*
249 * Second, check forced flags, if so desired.
250 *
251 * We OR them together to save a conditional. A trick here is that the
252 * two IRQ flags are unused in the global flags, so we can still use the
253 * resulting value to check for suppressed interrupts.
254 */
255 if (a_fCheckIrqs)
256 {
257 /* Load VMCPU::fLocalForcedActions first and mask it. We can simplify the
258 masking by ASSUMING none of the unwanted flags are located above bit 30. */
259 uint64_t const fUnwantedCpuFFs = VMCPU_FF_PGM_SYNC_CR3
260 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
261 | VMCPU_FF_TLB_FLUSH
262 | VMCPU_FF_UNHALT;
263 AssertCompile(fUnwantedCpuFFs < RT_BIT_64(31));
264 off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPUCC, fLocalForcedActions));
265# if defined(RT_ARCH_AMD64)
266 /* and reg1, ~fUnwantedCpuFFs */
267 pCodeBuf[off++] = idxTmpReg1 >= 8 ? X86_OP_REX_B | X86_OP_REX_W : X86_OP_REX_W;
268 pCodeBuf[off++] = 0x81;
269 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, idxTmpReg1 & 7);
270 *(uint32_t *)&pCodeBuf[off] = ~(uint32_t)fUnwantedCpuFFs;
271 off += 4;
272
273# else
274 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxTmpReg2, ~fUnwantedCpuFFs);
275 off = iemNativeEmitAndGprByGprEx(pCodeBuf, off, idxTmpReg1, idxTmpReg2);
276# endif
277
278 /* OR in VM::fGlobalForcedActions. We access the member via pVCpu.
279 No need to mask anything here. Unfortunately, it's a 32-bit
280 variable, so we can't OR it directly on x86.
281
282 Note! We take a tiny liberty here and ASSUME that the VM and associated
283 VMCPU mappings are less than 2 GiB away from one another, so we
284 can access VM::fGlobalForcedActions via a 32-bit signed displacement.
285
286 This is _only_ a potential issue with VMs using the _support_ _driver_
287 for manging the structure, as it maps the individual bits separately
288 and the mapping order differs between host platforms. Linux may
289 map the VM structure higher than the VMCPU ones, whereas windows may
290 do put the VM structure in the lowest address. On all hosts there
291 is a chance that virtual memory fragmentation could cause the bits to
292 end up at a greater distance from one another, but it is rather
293 doubtful and we just ASSUME it won't happen for now...
294
295 When the VM structure is allocated in userland, there is one
296 allocation for it and all the associated VMCPU components, thus no
297 problems. */
298 AssertCompile(VM_FF_ALL_MASK == UINT32_MAX);
299 intptr_t const offGlobalForcedActions = (intptr_t)&pReNative->pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions
300 - (intptr_t)pReNative->pVCpu;
301 if (RT_LIKELY((int32_t)offGlobalForcedActions == offGlobalForcedActions))
302 { /* likely */ }
303 else
304 {
305 LogRelMax(16, ("!!WARNING!! offGlobalForcedActions=%#zx pVM=%p pVCpu=%p - CheckTimersAndIrqsCommon\n",
306 offGlobalForcedActions, pReNative->pVCpu->CTX_SUFF(pVM), pReNative->pVCpu));
307# ifdef IEM_WITH_THROW_CATCH
308 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
309# else
310 AssertReleaseFailed();
311# endif
312 }
313
314# ifdef RT_ARCH_AMD64
315 if (idxTmpReg2 >= 8)
316 pCodeBuf[off++] = X86_OP_REX_R;
317 pCodeBuf[off++] = 0x8b; /* mov */
318 off = iemNativeEmitGprByVCpuSignedDisp(pCodeBuf, off, idxTmpReg2, (int32_t)offGlobalForcedActions);
319
320 /* or reg1, reg2 */
321 off = iemNativeEmitOrGprByGprEx(pCodeBuf, off, idxTmpReg1, idxTmpReg2);
322
323 /* jz nothing_pending */
324 uint32_t const offFixup1 = off;
325 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, IEMNATIVE_HAS_POSTPONED_EFLAGS_CALCS(pReNative) ? off + 512 : off + 64,
326 kIemNativeInstrCond_e);
327
328# elif defined(RT_ARCH_ARM64)
329 Assert(offGlobalForcedActions < 0);
330 off = iemNativeEmitGprBySignedVCpuLdStEx(pCodeBuf, off, idxTmpReg2, (int32_t)offGlobalForcedActions,
331 kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
332 off = iemNativeEmitOrGprByGprEx(pCodeBuf, off, idxTmpReg1, idxTmpReg2);
333
334 /* cbz nothing_pending */
335 uint32_t const offFixup1 = off;
336 off = iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToFixedEx(pCodeBuf, off, idxTmpReg1, true /*f64Bit*/,
337 false /*fJmpIfNotZero*/, off);
338# else
339# error "port me"
340# endif
341
342 /* More than just IRQ FFs pending? */
343 AssertCompile((VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) == 3);
344 /* cmp reg1, 3 */
345 off = iemNativeEmitCmpGprWithImmEx(pCodeBuf, off, idxTmpReg1, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
346 /* ja ReturnBreakFF */
347 off = iemNativeEmitTbExitJccEx<kIemNativeLabelType_ReturnBreakFF>(pReNative, pCodeBuf, off, kIemNativeInstrCond_nbe);
348
349 /*
350 * Okay, we've only got pending IRQ related FFs: Can we dispatch IRQs?
351 *
352 * ASSUME that the shadow flags are cleared when they ought to be cleared,
353 * so we can skip the RIP check.
354 */
355 AssertCompile(CPUMCTX_INHIBIT_SHADOW < RT_BIT_32(31));
356 /* reg1 = efl & (IF | INHIBIT_SHADOW) */
357 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, idxTmpReg1, idxEflReg, X86_EFL_IF | CPUMCTX_INHIBIT_SHADOW);
358 /* reg1 ^= IF */
359 off = iemNativeEmitXorGpr32ByImmEx(pCodeBuf, off, idxTmpReg1, X86_EFL_IF);
360
361# ifdef RT_ARCH_AMD64
362 /* jz ReturnBreakFF */
363 off = iemNativeEmitTbExitJccEx<kIemNativeLabelType_ReturnBreakFF>(pReNative, pCodeBuf, off, kIemNativeInstrCond_e);
364
365# elif defined(RT_ARCH_ARM64)
366 /* cbz reg1, ReturnBreakFF */
367 off = iemNativeEmitTbExitIfGprIsZeroEx<kIemNativeLabelType_ReturnBreakFF>(pReNative, pCodeBuf, off,
368 idxTmpReg1, false /*f64Bit*/);
369# else
370# error "port me"
371# endif
372 /*
373 * nothing_pending:
374 */
375 iemNativeFixupFixedJump(pReNative, offFixup1, off);
376 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
377 }
378
379 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
380
381 /*
382 * Cleanup.
383 */
384 iemNativeRegFreeTmp(pReNative, idxTmpReg1);
385 if (a_fCheckIrqs)
386 {
387 iemNativeRegFreeTmp(pReNative, idxTmpReg2);
388 iemNativeRegFreeTmp(pReNative, idxEflReg);
389 }
390 else
391 {
392 Assert(idxTmpReg2 == UINT8_MAX);
393 Assert(idxEflReg == UINT8_MAX);
394 }
395
396 return off;
397}
398
399
400/**
401 * Built-in function that checks for pending interrupts that can be delivered or
402 * forced action flags.
403 *
404 * This triggers after the completion of an instruction, so EIP is already at
405 * the next instruction. If an IRQ or important FF is pending, this will return
406 * a non-zero status that stops TB execution.
407 */
408IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckIrq)
409{
410 BODY_FLUSH_PENDING_WRITES();
411 off = iemNativeRecompFunc_BltIn_CheckTimersAndIrqsCommon<false, true>(pReNative, off);
412
413 /* Note down that we've been here, so we can skip FFs + IRQ checks when
414 doing direct linking. */
415#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
416 pReNative->idxLastCheckIrqCallNo = pReNative->idxCurCall;
417 RT_NOREF(pCallEntry);
418#else
419 pReNative->idxLastCheckIrqCallNo = pCallEntry - pReNative->pTbOrg->Thrd.paCalls;
420#endif
421
422 return off;
423}
424
425IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckIrq)
426{
427 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
428 IEM_LIVENESS_RAW_EFLAGS_ONE_INPUT(pOutgoing, fEflOther);
429 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
430 RT_NOREF(pCallEntry);
431}
432
433
434/**
435 * Built-in function that works the cTbsTillNextTimerPoll counter on direct TB
436 * linking, like loop-jumps.
437 */
438IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckTimers)
439{
440 BODY_FLUSH_PENDING_WRITES();
441 RT_NOREF(pCallEntry);
442 return iemNativeRecompFunc_BltIn_CheckTimersAndIrqsCommon<true, false>(pReNative, off);
443}
444
445IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckTimers)
446{
447 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
448 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
449 RT_NOREF(pCallEntry);
450}
451
452
453/**
454 * Combined BltIn_CheckTimers + BltIn_CheckIrq for direct linking.
455 */
456IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckTimersAndIrq)
457{
458 BODY_FLUSH_PENDING_WRITES();
459 RT_NOREF(pCallEntry);
460 return iemNativeRecompFunc_BltIn_CheckTimersAndIrqsCommon<true, true>(pReNative, off);
461}
462
463IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckTimersAndIrq)
464{
465 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
466 IEM_LIVENESS_RAW_EFLAGS_ONE_INPUT(pOutgoing, fEflOther);
467 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
468 RT_NOREF(pCallEntry);
469}
470
471
472/**
473 * Built-in function checks if IEMCPU::fExec has the expected value.
474 */
475IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckMode)
476{
477 uint32_t const fExpectedExec = (uint32_t)pCallEntry->auParams[0];
478 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
479
480 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxTmpReg, RT_UOFFSETOF(VMCPUCC, iem.s.fExec));
481 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxTmpReg, IEMTB_F_KEY_MASK);
482 off = iemNativeEmitTbExitIfGpr32NotEqualImm<kIemNativeLabelType_ReturnBreak>(pReNative, off, idxTmpReg,
483 fExpectedExec & IEMTB_F_KEY_MASK);
484 iemNativeRegFreeTmp(pReNative, idxTmpReg);
485
486 /* Maintain the recompiler fExec state. */
487 pReNative->fExec = fExpectedExec & IEMTB_F_IEM_F_MASK;
488 return off;
489}
490
491IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckMode)
492{
493 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
494 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
495 RT_NOREF(pCallEntry);
496}
497
498
499/**
500 * Sets idxTbCurInstr in preparation of raising an exception or aborting the TB.
501 */
502/** @todo Optimize this, so we don't set the same value more than once. Just
503 * needs some tracking. */
504#ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
505# define BODY_SET_CUR_INSTR() \
506 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, pCallEntry->idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr))
507#else
508# define BODY_SET_CUR_INSTR() ((void)0)
509#endif
510
511
512/**
513 * Macro that emits the 16/32-bit CS.LIM check.
514 */
515#define BODY_CHECK_CS_LIM(a_cbInstr) \
516 off = iemNativeEmitBltInCheckCsLim(pReNative, off, (a_cbInstr))
517
518#define LIVENESS_CHECK_CS_LIM(a_pOutgoing) \
519 IEM_LIVENESS_RAW_SEG_LIMIT_INPUT(a_pOutgoing, X86_SREG_CS)
520
521DECL_FORCE_INLINE(uint32_t)
522iemNativeEmitBltInCheckCsLim(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr)
523{
524 Assert(cbInstr > 0);
525 Assert(cbInstr < 16);
526#ifdef VBOX_STRICT
527 off = iemNativeEmitMarker(pReNative, off, 0x80000001);
528#endif
529
530#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
531 Assert(pReNative->Core.offPc == 0);
532#endif
533
534 /*
535 * We need CS.LIM and RIP here. When cbInstr is larger than 1, we also need
536 * a temporary register for calculating the last address of the instruction.
537 *
538 * The calculation and comparisons are 32-bit. We ASSUME that the incoming
539 * RIP isn't totally invalid, i.e. that any jump/call/ret/iret instruction
540 * that last updated EIP here checked it already, and that we're therefore
541 * safe in the 32-bit wrap-around scenario to only check that the last byte
542 * is within CS.LIM. In the case of instruction-by-instruction advancing
543 * up to a EIP wrap-around, we know that CS.LIM is 4G-1 because the limit
544 * must be using 4KB granularity and the previous instruction was fine.
545 */
546 uint8_t const idxRegPc = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
547 kIemNativeGstRegUse_ReadOnly);
548 uint8_t const idxRegCsLim = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_LIMIT(X86_SREG_CS),
549 kIemNativeGstRegUse_ReadOnly);
550#ifdef RT_ARCH_AMD64
551 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
552#elif defined(RT_ARCH_ARM64)
553 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
554#else
555# error "Port me"
556#endif
557
558 if (cbInstr != 1)
559 {
560 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off);
561
562 /*
563 * 1. idxRegTmp = idxRegPc + cbInstr;
564 * 2. if idxRegTmp > idxRegCsLim then raise #GP(0).
565 */
566#ifdef RT_ARCH_AMD64
567 /* 1. lea tmp32, [Pc + cbInstr - 1] */
568 if (idxRegTmp >= 8 || idxRegPc >= 8)
569 pbCodeBuf[off++] = (idxRegTmp < 8 ? 0 : X86_OP_REX_R) | (idxRegPc < 8 ? 0 : X86_OP_REX_B);
570 pbCodeBuf[off++] = 0x8d;
571 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, idxRegTmp & 7, idxRegPc & 7);
572 if ((idxRegPc & 7) == X86_GREG_xSP)
573 pbCodeBuf[off++] = X86_SIB_MAKE(idxRegPc & 7, 4 /*no index*/, 0);
574 pbCodeBuf[off++] = cbInstr - 1;
575
576 /* 2. cmp tmp32(r), CsLim(r/m). */
577 if (idxRegTmp >= 8 || idxRegCsLim >= 8)
578 pbCodeBuf[off++] = (idxRegTmp < 8 ? 0 : X86_OP_REX_R) | (idxRegCsLim < 8 ? 0 : X86_OP_REX_B);
579 pbCodeBuf[off++] = 0x3b;
580 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegTmp & 7, idxRegCsLim & 7);
581
582#elif defined(RT_ARCH_ARM64)
583 /* 1. add tmp32, Pc, #cbInstr-1 */
584 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, idxRegTmp, idxRegPc, cbInstr - 1, false /*f64Bit*/);
585 /* 2. cmp tmp32, CsLim */
586 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, ARMV8_A64_REG_XZR, idxRegTmp, idxRegCsLim,
587 false /*f64Bit*/, true /*fSetFlags*/);
588
589#endif
590 iemNativeRegFreeTmp(pReNative, idxRegTmp);
591 }
592 else
593 {
594 /*
595 * Here we can skip step 1 and compare PC and CS.LIM directly.
596 */
597#ifdef RT_ARCH_AMD64
598 /* 2. cmp eip(r), CsLim(r/m). */
599 if (idxRegPc >= 8 || idxRegCsLim >= 8)
600 pbCodeBuf[off++] = (idxRegPc < 8 ? 0 : X86_OP_REX_R) | (idxRegCsLim < 8 ? 0 : X86_OP_REX_B);
601 pbCodeBuf[off++] = 0x3b;
602 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegPc & 7, idxRegCsLim & 7);
603
604#elif defined(RT_ARCH_ARM64)
605 /* 2. cmp Pc, CsLim */
606 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, ARMV8_A64_REG_XZR, idxRegPc, idxRegCsLim,
607 false /*f64Bit*/, true /*fSetFlags*/);
608
609#endif
610 }
611
612 /* 3. Jump if greater. */
613 off = iemNativeEmitTbExitJa<kIemNativeLabelType_RaiseGp0>(pReNative, off);
614
615 iemNativeRegFreeTmp(pReNative, idxRegCsLim);
616 iemNativeRegFreeTmp(pReNative, idxRegPc);
617 return off;
618}
619
620
621/**
622 * Macro that considers whether we need CS.LIM checking after a branch or
623 * crossing over to a new page.
624 */
625#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) \
626 RT_NOREF(a_cbInstr); \
627 off = iemNativeEmitBltInConsiderLimChecking(pReNative, off)
628
629#define LIVENESS_CONSIDER_CS_LIM_CHECKING(a_pOutgoing) \
630 IEM_LIVENESS_RAW_SEG_LIMIT_INPUT(a_pOutgoing, X86_SREG_CS); \
631 IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, X86_SREG_CS)
632
633DECL_FORCE_INLINE(uint32_t)
634iemNativeEmitBltInConsiderLimChecking(PIEMRECOMPILERSTATE pReNative, uint32_t off)
635{
636#ifdef VBOX_STRICT
637 off = iemNativeEmitMarker(pReNative, off, 0x80000002);
638#endif
639
640#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
641 Assert(pReNative->Core.offPc == 0);
642#endif
643
644 /*
645 * This check must match the ones in the iem in iemGetTbFlagsForCurrentPc
646 * exactly:
647 *
648 * int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
649 * if (offFromLim >= X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
650 * return fRet;
651 * return fRet | IEMTB_F_CS_LIM_CHECKS;
652 *
653 *
654 * We need EIP, CS.LIM and CS.BASE here.
655 */
656
657 /* Calculate the offFromLim first: */
658 uint8_t const idxRegPc = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
659 kIemNativeGstRegUse_ReadOnly);
660 uint8_t const idxRegCsLim = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_LIMIT(X86_SREG_CS),
661 kIemNativeGstRegUse_ReadOnly);
662 uint8_t const idxRegLeft = iemNativeRegAllocTmp(pReNative, &off);
663
664#ifdef RT_ARCH_ARM64
665 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
666 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegLeft, idxRegCsLim, idxRegPc);
667 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
668#else
669 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegLeft, idxRegCsLim);
670 off = iemNativeEmitSubTwoGprs(pReNative, off, idxRegLeft, idxRegPc);
671#endif
672
673 iemNativeRegFreeTmp(pReNative, idxRegCsLim);
674 iemNativeRegFreeTmp(pReNative, idxRegPc);
675
676 /* Calculate the threshold level (right side). */
677 uint8_t const idxRegCsBase = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS),
678 kIemNativeGstRegUse_ReadOnly);
679 uint8_t const idxRegRight = iemNativeRegAllocTmp(pReNative, &off);
680
681#ifdef RT_ARCH_ARM64
682 pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
683 Assert(Armv8A64ConvertImmRImmS2Mask32(11, 0) == GUEST_PAGE_OFFSET_MASK);
684 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegRight, idxRegCsBase, 11, 0, false /*f64Bit*/);
685 pu32CodeBuf[off++] = Armv8A64MkInstrNeg(idxRegRight);
686 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegRight, idxRegRight, (X86_PAGE_SIZE + 16) / 2);
687 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegRight, idxRegRight, (X86_PAGE_SIZE + 16) / 2);
688 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
689
690#else
691 off = iemNativeEmitLoadGprImm32(pReNative, off, idxRegRight, GUEST_PAGE_OFFSET_MASK);
692 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, idxRegRight, idxRegCsBase);
693 off = iemNativeEmitNegGpr(pReNative, off, idxRegRight);
694 off = iemNativeEmitAddGprImm(pReNative, off, idxRegRight, X86_PAGE_SIZE + 16);
695#endif
696
697 iemNativeRegFreeTmp(pReNative, idxRegCsBase);
698
699 /* Compare the two and jump out if we're too close to the limit. */
700 off = iemNativeEmitCmpGprWithGpr(pReNative, off, idxRegLeft, idxRegRight);
701 off = iemNativeEmitTbExitJl<kIemNativeLabelType_NeedCsLimChecking>(pReNative, off);
702
703 iemNativeRegFreeTmp(pReNative, idxRegRight);
704 iemNativeRegFreeTmp(pReNative, idxRegLeft);
705 return off;
706}
707
708
709
710/**
711 * Macro that implements opcode (re-)checking.
712 */
713#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) \
714 RT_NOREF(a_cbInstr); \
715 off = iemNativeEmitBltInCheckOpcodes(pReNative, off, (a_pTb), (a_idxRange), (a_offRange))
716
717#define LIVENESS_CHECK_OPCODES(a_pOutgoing) ((void)0)
718
719#if 0 /* debugging aid */
720bool g_fBpOnObsoletion = false;
721# define BP_ON_OBSOLETION g_fBpOnObsoletion
722#else
723# define BP_ON_OBSOLETION 0
724#endif
725
726DECL_FORCE_INLINE(uint32_t)
727iemNativeEmitBltInCheckOpcodes(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb, uint8_t idxRange, uint16_t offRange)
728{
729 Assert(idxRange < pTb->cRanges && pTb->cRanges <= RT_ELEMENTS(pTb->aRanges));
730 Assert(offRange < pTb->aRanges[idxRange].cbOpcodes);
731#ifdef VBOX_STRICT
732 off = iemNativeEmitMarker(pReNative, off, 0x80000003);
733#endif
734
735 /*
736 * Where to start and how much to compare.
737 *
738 * Looking at the ranges produced when r160746 was running a DOS VM with TB
739 * logging, the ranges can be anything from 1 byte to at least 0x197 bytes,
740 * with the 6, 5, 4, 7, 8, 40, 3, 2, 9 and 10 being the top 10 in the sample.
741 *
742 * The top 10 for the early boot phase of a 64-bit debian 9.4 VM: 5, 9, 8,
743 * 12, 10, 11, 6, 13, 15 and 16. Max 0x359 bytes. Same revision as above.
744 */
745 uint16_t offPage = pTb->aRanges[idxRange].offPhysPage + offRange;
746 uint16_t cbLeft = pTb->aRanges[idxRange].cbOpcodes - offRange;
747 Assert(cbLeft > 0);
748 uint8_t const *pbOpcodes = &pTb->pabOpcodes[pTb->aRanges[idxRange].offOpcodes + offRange];
749 uint32_t offConsolidatedJump = UINT32_MAX;
750
751#ifdef RT_ARCH_AMD64
752 /* AMD64/x86 offers a bunch of options. Smaller stuff will can be
753 completely inlined, for larger we use REPE CMPS. */
754# define CHECK_OPCODES_CMP_IMMXX(a_idxReg, a_bOpcode) /* cost: 3 bytes */ do { \
755 pbCodeBuf[off++] = a_bOpcode; \
756 Assert(offPage < 127); \
757 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, 7, a_idxReg); \
758 pbCodeBuf[off++] = RT_BYTE1(offPage); \
759 } while (0)
760
761# define CHECK_OPCODES_CMP_JMP() /* cost: 7 bytes first time, then 2 bytes */ do { \
762 if (offConsolidatedJump != UINT32_MAX) \
763 { \
764 int32_t const offDisp = (int32_t)offConsolidatedJump - (int32_t)(off + 2); \
765 Assert(offDisp >= -128); \
766 pbCodeBuf[off++] = 0x75; /* jnz near */ \
767 pbCodeBuf[off++] = (uint8_t)offDisp; \
768 } \
769 else \
770 { \
771 pbCodeBuf[off++] = 0x74; /* jz near +5 */ \
772 offConsolidatedJump = ++off; \
773 if (BP_ON_OBSOLETION) pbCodeBuf[off++] = 0xcc; \
774 off = iemNativeEmitTbExitEx<kIemNativeLabelType_ObsoleteTb, false /*a_fActuallyExitingTb*/>(pReNative, \
775 pbCodeBuf, off); \
776 pbCodeBuf[offConsolidatedJump - 1] = off - offConsolidatedJump; \
777 } \
778 } while (0)
779
780# define CHECK_OPCODES_CMP_IMM32(a_idxReg) /* cost: 3+4+2 = 9 */ do { \
781 CHECK_OPCODES_CMP_IMMXX(a_idxReg, 0x81); \
782 pbCodeBuf[off++] = *pbOpcodes++; \
783 pbCodeBuf[off++] = *pbOpcodes++; \
784 pbCodeBuf[off++] = *pbOpcodes++; \
785 pbCodeBuf[off++] = *pbOpcodes++; \
786 cbLeft -= 4; \
787 offPage += 4; \
788 CHECK_OPCODES_CMP_JMP(); \
789 } while (0)
790
791# define CHECK_OPCODES_CMP_IMM16(a_idxReg) /* cost: 1+3+2+2 = 8 */ do { \
792 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP; \
793 CHECK_OPCODES_CMP_IMMXX(a_idxReg, 0x81); \
794 pbCodeBuf[off++] = *pbOpcodes++; \
795 pbCodeBuf[off++] = *pbOpcodes++; \
796 cbLeft -= 2; \
797 offPage += 2; \
798 CHECK_OPCODES_CMP_JMP(); \
799 } while (0)
800
801# define CHECK_OPCODES_CMP_IMM8(a_idxReg) /* cost: 3+1+2 = 6 */ do { \
802 CHECK_OPCODES_CMP_IMMXX(a_idxReg, 0x80); \
803 pbCodeBuf[off++] = *pbOpcodes++; \
804 cbLeft -= 1; \
805 offPage += 1; \
806 CHECK_OPCODES_CMP_JMP(); \
807 } while (0)
808
809# define CHECK_OPCODES_CMPSX(a_bOpcode, a_cbToSubtract, a_bPrefix) /* cost: 2+2 = 4 */ do { \
810 if (a_bPrefix) \
811 pbCodeBuf[off++] = (a_bPrefix); \
812 pbCodeBuf[off++] = (a_bOpcode); \
813 CHECK_OPCODES_CMP_JMP(); \
814 cbLeft -= (a_cbToSubtract); \
815 } while (0)
816
817# define CHECK_OPCODES_ECX_IMM(a_uValue) /* cost: 5 */ do { \
818 pbCodeBuf[off++] = 0xb8 + X86_GREG_xCX; \
819 pbCodeBuf[off++] = RT_BYTE1(a_uValue); \
820 pbCodeBuf[off++] = RT_BYTE2(a_uValue); \
821 pbCodeBuf[off++] = RT_BYTE3(a_uValue); \
822 pbCodeBuf[off++] = RT_BYTE4(a_uValue); \
823 } while (0)
824
825 if (cbLeft <= 24)
826 {
827 uint8_t const idxRegTmp = iemNativeRegAllocTmpEx(pReNative, &off,
828 ( RT_BIT_32(X86_GREG_xAX)
829 | RT_BIT_32(X86_GREG_xCX)
830 | RT_BIT_32(X86_GREG_xDX)
831 | RT_BIT_32(X86_GREG_xBX)
832 | RT_BIT_32(X86_GREG_xSI)
833 | RT_BIT_32(X86_GREG_xDI))
834 & ~IEMNATIVE_REG_FIXED_MASK); /* pick reg not requiring rex prefix */
835 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.pbInstrBuf));
836 if (offPage >= 128 - cbLeft)
837 {
838 off = iemNativeEmitAddGprImm(pReNative, off, idxRegTmp, offPage & ~(uint16_t)3);
839 offPage &= 3;
840 }
841
842 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6 + 14 + 54 + 8 + 6 + BP_ON_OBSOLETION /* = 88 */
843 + IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS);
844
845 if (cbLeft > 8)
846 switch (offPage & 3)
847 {
848 case 0:
849 break;
850 case 1: /* cost: 6 + 8 = 14 */
851 CHECK_OPCODES_CMP_IMM8(idxRegTmp);
852 RT_FALL_THRU();
853 case 2: /* cost: 8 */
854 CHECK_OPCODES_CMP_IMM16(idxRegTmp);
855 break;
856 case 3: /* cost: 6 */
857 CHECK_OPCODES_CMP_IMM8(idxRegTmp);
858 break;
859 }
860
861 while (cbLeft >= 4)
862 CHECK_OPCODES_CMP_IMM32(idxRegTmp); /* max iteration: 24/4 = 6; --> cost: 6 * 9 = 54 */
863
864 if (cbLeft >= 2)
865 CHECK_OPCODES_CMP_IMM16(idxRegTmp); /* cost: 8 */
866 if (cbLeft)
867 CHECK_OPCODES_CMP_IMM8(idxRegTmp); /* cost: 6 */
868
869 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
870 iemNativeRegFreeTmp(pReNative, idxRegTmp);
871 }
872 else
873 {
874 /* RDI = &pbInstrBuf[offPage] */
875 uint8_t const idxRegDi = iemNativeRegAllocTmpEx(pReNative, &off, RT_BIT_32(X86_GREG_xDI));
876 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegDi, RT_UOFFSETOF(VMCPU, iem.s.pbInstrBuf));
877 if (offPage != 0)
878 off = iemNativeEmitAddGprImm(pReNative, off, idxRegDi, offPage);
879
880 /* RSI = pbOpcodes */
881 uint8_t const idxRegSi = iemNativeRegAllocTmpEx(pReNative, &off, RT_BIT_32(X86_GREG_xSI));
882 off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegSi, (uintptr_t)pbOpcodes);
883
884 /* RCX = counts. */
885 uint8_t const idxRegCx = iemNativeRegAllocTmpEx(pReNative, &off, RT_BIT_32(X86_GREG_xCX));
886
887 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6 + 10 + 5 + 5 + 3 + 4 + 3 + BP_ON_OBSOLETION /*= 36*/
888 + IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS);
889
890 /** @todo profile and optimize this further. Maybe an idea to align by
891 * offPage if the two cannot be reconsidled. */
892 /* Align by the page offset, so that at least one of the accesses are naturally aligned. */
893 switch (offPage & 7) /* max cost: 10 */
894 {
895 case 0:
896 break;
897 case 1: /* cost: 3+4+3 = 10 */
898 CHECK_OPCODES_CMPSX(0xa6, 1, 0);
899 RT_FALL_THRU();
900 case 2: /* cost: 4+3 = 7 */
901 CHECK_OPCODES_CMPSX(0xa7, 2, X86_OP_PRF_SIZE_OP);
902 CHECK_OPCODES_CMPSX(0xa7, 4, 0);
903 break;
904 case 3: /* cost: 3+3 = 6 */
905 CHECK_OPCODES_CMPSX(0xa6, 1, 0);
906 RT_FALL_THRU();
907 case 4: /* cost: 3 */
908 CHECK_OPCODES_CMPSX(0xa7, 4, 0);
909 break;
910 case 5: /* cost: 3+4 = 7 */
911 CHECK_OPCODES_CMPSX(0xa6, 1, 0);
912 RT_FALL_THRU();
913 case 6: /* cost: 4 */
914 CHECK_OPCODES_CMPSX(0xa7, 2, X86_OP_PRF_SIZE_OP);
915 break;
916 case 7: /* cost: 3 */
917 CHECK_OPCODES_CMPSX(0xa6, 1, 0);
918 break;
919 }
920
921 /* Compare qwords: */
922 uint32_t const cQWords = cbLeft >> 3;
923 CHECK_OPCODES_ECX_IMM(cQWords); /* cost: 5 */
924
925 pbCodeBuf[off++] = X86_OP_PRF_REPZ; /* cost: 5 */
926 CHECK_OPCODES_CMPSX(0xa7, 0, X86_OP_REX_W);
927 cbLeft &= 7;
928
929 if (cbLeft & 4)
930 CHECK_OPCODES_CMPSX(0xa7, 4, 0); /* cost: 3 */
931 if (cbLeft & 2)
932 CHECK_OPCODES_CMPSX(0xa7, 2, X86_OP_PRF_SIZE_OP); /* cost: 4 */
933 if (cbLeft & 1)
934 CHECK_OPCODES_CMPSX(0xa6, 1, 0); /* cost: 3 */
935
936 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
937 iemNativeRegFreeTmp(pReNative, idxRegCx);
938 iemNativeRegFreeTmp(pReNative, idxRegSi);
939 iemNativeRegFreeTmp(pReNative, idxRegDi);
940 }
941
942#elif defined(RT_ARCH_ARM64)
943 /* We need pbInstrBuf in a register, whatever we do. */
944 uint8_t const idxRegSrc1Ptr = iemNativeRegAllocTmp(pReNative, &off);
945 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegSrc1Ptr, RT_UOFFSETOF(VMCPU, iem.s.pbInstrBuf));
946
947 /* We also need at least one more register for holding bytes & words we
948 load via pbInstrBuf. */
949 uint8_t const idxRegSrc1Val = iemNativeRegAllocTmp(pReNative, &off);
950
951 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64 + IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS * 2);
952
953 /* One byte compare can be done with the opcode byte as an immediate. We'll
954 do this to uint16_t align src1. */
955 bool fPendingJmp = RT_BOOL(offPage & 1);
956 if (fPendingJmp)
957 {
958 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Byte, idxRegSrc1Val, idxRegSrc1Ptr, offPage);
959 pu32CodeBuf[off++] = Armv8A64MkInstrCmpUImm12(idxRegSrc1Val, *pbOpcodes++, false /*f64Bit*/);
960 offPage += 1;
961 cbLeft -= 1;
962 }
963
964 if (cbLeft > 0)
965 {
966 /* We need a register for holding the opcode bytes we're comparing with,
967 as CCMP only has a 5-bit immediate form and thus cannot hold bytes. */
968 uint8_t const idxRegSrc2Val = iemNativeRegAllocTmp(pReNative, &off);
969
970 /* Word (uint32_t) aligning the src1 pointer is best done using a 16-bit constant load. */
971 if ((offPage & 3) && cbLeft >= 2)
972 {
973 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Half, idxRegSrc1Val, idxRegSrc1Ptr, offPage / 2);
974 pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegSrc2Val, RT_MAKE_U16(pbOpcodes[0], pbOpcodes[1]));
975 if (fPendingJmp)
976 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
977 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
978 else
979 {
980 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, false /*f64Bit*/);
981 fPendingJmp = true;
982 }
983 pbOpcodes += 2;
984 offPage += 2;
985 cbLeft -= 2;
986 }
987
988 /* DWord (uint64_t) aligning the src2 pointer. We use a 32-bit constant here for simplicitly. */
989 if ((offPage & 7) && cbLeft >= 4)
990 {
991 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxRegSrc1Val, idxRegSrc1Ptr, offPage / 4);
992 off = iemNativeEmitLoadGpr32ImmEx(pu32CodeBuf, off, idxRegSrc2Val,
993 RT_MAKE_U32_FROM_MSB_U8(pbOpcodes[3], pbOpcodes[2], pbOpcodes[1], pbOpcodes[0]));
994 if (fPendingJmp)
995 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
996 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
997 else
998 {
999 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, false /*f64Bit*/);
1000 fPendingJmp = true;
1001 }
1002 pbOpcodes += 4;
1003 offPage += 4;
1004 cbLeft -= 4;
1005 }
1006
1007 /*
1008 * If we've got 16 bytes or more left, switch to memcmp-style.
1009 */
1010 if (cbLeft >= 16)
1011 {
1012 /* We need a pointer to the copy of the original opcode bytes. */
1013 uint8_t const idxRegSrc2Ptr = iemNativeRegAllocTmp(pReNative, &off);
1014 off = iemNativeEmitLoadGprImmEx(pu32CodeBuf, off, idxRegSrc2Ptr, (uintptr_t)pbOpcodes);
1015
1016 /* If there are more than 32 bytes to compare we create a loop, for
1017 which we'll need a loop register. */
1018 if (cbLeft >= 64)
1019 {
1020 if (fPendingJmp)
1021 {
1022 off = iemNativeEmitTbExitJccEx<kIemNativeLabelType_ObsoleteTb>(pReNative, pu32CodeBuf, off,
1023 kArmv8InstrCond_Ne);
1024 fPendingJmp = false;
1025 }
1026
1027 uint8_t const idxRegLoop = iemNativeRegAllocTmp(pReNative, &off);
1028 uint16_t const cLoops = cbLeft / 32;
1029 cbLeft = cbLeft % 32;
1030 pbOpcodes += cLoops * 32;
1031 pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegLoop, cLoops);
1032
1033 if (offPage != 0) /** @todo optimize out this instruction. */
1034 {
1035 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegSrc1Ptr, idxRegSrc1Ptr, offPage);
1036 offPage = 0;
1037 }
1038
1039 uint32_t const offLoopStart = off;
1040 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr, 0);
1041 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val, idxRegSrc2Ptr, 0);
1042 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val);
1043
1044 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr, 1);
1045 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val, idxRegSrc2Ptr, 1);
1046 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
1047 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq);
1048
1049 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr, 2);
1050 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val, idxRegSrc2Ptr, 2);
1051 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
1052 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq);
1053
1054 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr, 3);
1055 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val, idxRegSrc2Ptr, 3);
1056 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
1057 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq);
1058
1059 off = iemNativeEmitTbExitJccEx<kIemNativeLabelType_ObsoleteTb>(pReNative, pu32CodeBuf, off, kArmv8InstrCond_Ne);
1060
1061 /* Advance and loop. */
1062 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegSrc1Ptr, idxRegSrc1Ptr, 0x20);
1063 pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegSrc2Ptr, idxRegSrc2Ptr, 0x20);
1064 pu32CodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegLoop, idxRegLoop, 1, false /*f64Bit*/, true /*fSetFlags*/);
1065 pu32CodeBuf[off] = Armv8A64MkInstrBCond(kArmv8InstrCond_Ne, (int32_t)offLoopStart - (int32_t)off);
1066 off++;
1067
1068 iemNativeRegFreeTmp(pReNative, idxRegLoop);
1069 }
1070
1071 /* Deal with any remaining dwords (uint64_t). There can be up to
1072 three if we looped and four if we didn't. */
1073 uint32_t offSrc2 = 0;
1074 while (cbLeft >= 8)
1075 {
1076 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val,
1077 idxRegSrc1Ptr, offPage / 8);
1078 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc2Val,
1079 idxRegSrc2Ptr, offSrc2 / 8);
1080 if (fPendingJmp)
1081 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
1082 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq);
1083 else
1084 {
1085 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val);
1086 fPendingJmp = true;
1087 }
1088 pbOpcodes += 8;
1089 offPage += 8;
1090 offSrc2 += 8;
1091 cbLeft -= 8;
1092 }
1093
1094 iemNativeRegFreeTmp(pReNative, idxRegSrc2Ptr);
1095 /* max cost thus far: memcmp-loop=43 vs memcmp-no-loop=30 */
1096 }
1097 /*
1098 * Otherwise, we compare with constants and merge with the general mop-up.
1099 */
1100 else
1101 {
1102 while (cbLeft >= 8)
1103 {
1104 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxRegSrc1Val, idxRegSrc1Ptr,
1105 offPage / 8);
1106 off = iemNativeEmitLoadGprImmEx(pu32CodeBuf, off, idxRegSrc2Val,
1107 RT_MAKE_U64_FROM_MSB_U8(pbOpcodes[7], pbOpcodes[6], pbOpcodes[5], pbOpcodes[4],
1108 pbOpcodes[3], pbOpcodes[2], pbOpcodes[1], pbOpcodes[0]));
1109 if (fPendingJmp)
1110 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
1111 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, true /*f64Bit*/);
1112 else
1113 {
1114 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, true /*f64Bit*/);
1115 fPendingJmp = true;
1116 }
1117 pbOpcodes += 8;
1118 offPage += 8;
1119 cbLeft -= 8;
1120 }
1121 /* max cost thus far: 21 */
1122 }
1123
1124 /* Deal with any remaining bytes (7 or less). */
1125 Assert(cbLeft < 8);
1126 if (cbLeft >= 4)
1127 {
1128 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxRegSrc1Val, idxRegSrc1Ptr,
1129 offPage / 4);
1130 off = iemNativeEmitLoadGpr32ImmEx(pu32CodeBuf, off, idxRegSrc2Val,
1131 RT_MAKE_U32_FROM_MSB_U8(pbOpcodes[3], pbOpcodes[2], pbOpcodes[1], pbOpcodes[0]));
1132 if (fPendingJmp)
1133 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
1134 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
1135 else
1136 {
1137 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, false /*f64Bit*/);
1138 fPendingJmp = true;
1139 }
1140 pbOpcodes += 4;
1141 offPage += 4;
1142 cbLeft -= 4;
1143
1144 }
1145
1146 if (cbLeft >= 2)
1147 {
1148 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Half, idxRegSrc1Val, idxRegSrc1Ptr,
1149 offPage / 2);
1150 pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegSrc2Val, RT_MAKE_U16(pbOpcodes[0], pbOpcodes[1]));
1151 if (fPendingJmp)
1152 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
1153 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
1154 else
1155 {
1156 pu32CodeBuf[off++] = Armv8A64MkInstrCmpReg(idxRegSrc1Val, idxRegSrc2Val, false /*f64Bit*/);
1157 fPendingJmp = true;
1158 }
1159 pbOpcodes += 2;
1160 offPage += 2;
1161 cbLeft -= 2;
1162 }
1163
1164 if (cbLeft > 0)
1165 {
1166 Assert(cbLeft == 1);
1167 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Byte, idxRegSrc1Val, idxRegSrc1Ptr, offPage);
1168 if (fPendingJmp)
1169 {
1170 pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegSrc2Val, pbOpcodes[0]);
1171 pu32CodeBuf[off++] = Armv8A64MkInstrCCmpReg(idxRegSrc1Val, idxRegSrc2Val,
1172 ARMA64_NZCV_F_N0_Z0_C0_V0, kArmv8InstrCond_Eq, false /*f64Bit*/);
1173 }
1174 else
1175 {
1176 pu32CodeBuf[off++] = Armv8A64MkInstrCmpUImm12(idxRegSrc1Val, pbOpcodes[0], false /*f64Bit*/);
1177 fPendingJmp = true;
1178 }
1179 pbOpcodes += 1;
1180 offPage += 1;
1181 cbLeft -= 1;
1182 }
1183
1184 iemNativeRegFreeTmp(pReNative, idxRegSrc2Val);
1185 }
1186 Assert(cbLeft == 0);
1187
1188 /*
1189 * Finally, the branch on difference.
1190 */
1191 if (fPendingJmp)
1192 off = iemNativeEmitTbExitJnz<kIemNativeLabelType_ObsoleteTb>(pReNative, off);
1193
1194 RT_NOREF(pu32CodeBuf, cbLeft, offPage, pbOpcodes, offConsolidatedJump);
1195
1196 /* max costs: memcmp-loop=54; memcmp-no-loop=41; only-src1-ptr=32 */
1197 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1198 iemNativeRegFreeTmp(pReNative, idxRegSrc1Val);
1199 iemNativeRegFreeTmp(pReNative, idxRegSrc1Ptr);
1200
1201#else
1202# error "Port me"
1203#endif
1204 return off;
1205}
1206
1207
1208
1209/**
1210 * Macro that implements PC check after a conditional branch.
1211 */
1212#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) \
1213 RT_NOREF(a_cbInstr); \
1214 off = iemNativeEmitBltInCheckPcAfterBranch(pReNative, off, a_pTb, a_idxRange, a_offRange)
1215
1216#define LIVENESS_CHECK_PC_AFTER_BRANCH(a_pOutgoing, a_pCallEntry) \
1217 if (!IEM_F_MODE_X86_IS_FLAT((uint32_t)(a_pCallEntry)->auParams[0] >> 8)) \
1218 IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, X86_SREG_CS); \
1219 else do { } while (0)
1220
1221DECL_FORCE_INLINE(uint32_t)
1222iemNativeEmitBltInCheckPcAfterBranch(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb,
1223 uint8_t idxRange, uint16_t offRange)
1224{
1225#ifdef VBOX_STRICT
1226 off = iemNativeEmitMarker(pReNative, off, 0x80000004);
1227#endif
1228
1229#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
1230 Assert(pReNative->Core.offPc == 0);
1231#endif
1232
1233 /*
1234 * The GCPhysRangePageWithOffset value in the threaded function is a fixed
1235 * constant for us here.
1236 *
1237 * We can pretend that iem.s.cbInstrBufTotal is X86_PAGE_SIZE here, because
1238 * it serves no purpose as a CS.LIM, if that's needed we've just performed
1239 * it, and as long as we don't implement code TLB reload code here there is
1240 * no point in checking that the TLB data we're using is still valid.
1241 *
1242 * What we to do is.
1243 * 1. Calculate the FLAT PC (RIP + CS.BASE).
1244 * 2. Subtract iem.s.uInstrBufPc from it and getting 'off'.
1245 * 3. The 'off' must be less than X86_PAGE_SIZE/cbInstrBufTotal or
1246 * we're in the wrong spot and need to find a new TB.
1247 * 4. Add 'off' to iem.s.GCPhysInstrBuf and compare with the
1248 * GCPhysRangePageWithOffset constant mentioned above.
1249 *
1250 * The adding of CS.BASE to RIP can be skipped in the first step if we're
1251 * in 64-bit code or flat 32-bit.
1252 */
1253
1254 /* Allocate registers for step 1. Get the shadowed stuff before allocating
1255 the temp register, so we don't accidentally clobber something we'll be
1256 needing again immediately. This is why we get idxRegCsBase here. */
1257 uint8_t const idxRegPc = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
1258 kIemNativeGstRegUse_ReadOnly);
1259 uint8_t const idxRegCsBase = IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) ? UINT8_MAX
1260 : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS),
1261 kIemNativeGstRegUse_ReadOnly);
1262
1263 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off);
1264
1265#ifdef VBOX_STRICT
1266 /* Do assertions before idxRegTmp contains anything. */
1267 Assert(RT_SIZEOFMEMB(VMCPUCC, iem.s.cbInstrBufTotal) == sizeof(uint16_t));
1268# ifdef RT_ARCH_AMD64
1269 {
1270 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8+2+1 + 11+2+1);
1271 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
1272 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1273 {
1274 /* cmp r/m64, imm8 */
1275 pbCodeBuf[off++] = X86_OP_REX_W;
1276 pbCodeBuf[off++] = 0x83;
1277 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 7, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base));
1278 pbCodeBuf[off++] = 0;
1279 /* je rel8 */
1280 pbCodeBuf[off++] = 0x74;
1281 pbCodeBuf[off++] = 1;
1282 /* int3 */
1283 pbCodeBuf[off++] = 0xcc;
1284
1285 }
1286
1287 /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); - done later by the non-x86 code */
1288 /* test r/m64, imm32 */
1289 pbCodeBuf[off++] = X86_OP_REX_W;
1290 pbCodeBuf[off++] = 0xf7;
1291 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 0, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1292 pbCodeBuf[off++] = RT_BYTE1(X86_PAGE_OFFSET_MASK);
1293 pbCodeBuf[off++] = RT_BYTE2(X86_PAGE_OFFSET_MASK);
1294 pbCodeBuf[off++] = RT_BYTE3(X86_PAGE_OFFSET_MASK);
1295 pbCodeBuf[off++] = RT_BYTE4(X86_PAGE_OFFSET_MASK);
1296 /* jz rel8 */
1297 pbCodeBuf[off++] = 0x74;
1298 pbCodeBuf[off++] = 1;
1299 /* int3 */
1300 pbCodeBuf[off++] = 0xcc;
1301 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1302 }
1303# else
1304
1305 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
1306 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1307 {
1308 off = iemNativeEmitLoadGprWithGstRegT<kIemNativeGstReg_CsBase>(pReNative, off, idxRegTmp);
1309# ifdef RT_ARCH_ARM64
1310 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
1311 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, idxRegTmp);
1312 pu32CodeBuf[off++] = Armv8A64MkInstrBrk(0x2004);
1313 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1314# else
1315# error "Port me!"
1316# endif
1317 }
1318# endif
1319
1320#endif /* VBOX_STRICT */
1321
1322 /* 1+2. Calculate 'off' first (into idxRegTmp). */
1323 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.uInstrBufPc));
1324 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1325 {
1326#ifdef RT_ARCH_ARM64
1327 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1328 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegPc, idxRegTmp);
1329 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1330#else
1331 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
1332 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
1333#endif
1334 }
1335 else
1336 {
1337#ifdef RT_ARCH_ARM64
1338 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
1339 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegCsBase, idxRegTmp);
1340 pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegPc);
1341 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1342#else
1343 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
1344 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegCsBase);
1345 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
1346#endif
1347 iemNativeRegFreeTmp(pReNative, idxRegCsBase);
1348 }
1349 iemNativeRegFreeTmp(pReNative, idxRegPc);
1350
1351 /* 3. Check that off is less than X86_PAGE_SIZE/cbInstrBufTotal. */
1352 off = iemNativeEmitCmpGprWithImm(pReNative, off, idxRegTmp, X86_PAGE_SIZE - 1);
1353 off = iemNativeEmitTbExitJa<kIemNativeLabelType_CheckBranchMiss>(pReNative, off);
1354
1355 /* 4. Add iem.s.GCPhysInstrBuf and compare with GCPhysRangePageWithOffset. */
1356#ifdef RT_ARCH_AMD64
1357 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1358 pbCodeBuf[off++] = idxRegTmp < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R;
1359 pbCodeBuf[off++] = 0x03; /* add r64, r/m64 */
1360 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1361 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1362
1363#elif defined(RT_ARCH_ARM64)
1364 uint8_t const idxRegTmp2 = iemNativeRegAllocTmp(pReNative, &off);
1365
1366 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp2, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1367 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1368 pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegTmp2);
1369 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1370
1371# ifdef VBOX_STRICT /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); */
1372 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegTmp2, X86_PAGE_OFFSET_MASK, true /*fSetFlags*/);
1373 off = iemNativeEmitJzToFixed(pReNative, off, off + 2 /* correct for ARM64 */);
1374 off = iemNativeEmitBrk(pReNative, off, 0x2005);
1375# endif
1376 iemNativeRegFreeTmp(pReNative, idxRegTmp2);
1377#else
1378# error "Port me"
1379#endif
1380
1381 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(pTb, idxRange)
1382 | pTb->aRanges[idxRange].offPhysPage)
1383 + offRange;
1384 off = iemNativeEmitTbExitIfGprNotEqualImm<kIemNativeLabelType_CheckBranchMiss>(pReNative, off, idxRegTmp,
1385 GCPhysRangePageWithOffset);
1386
1387 iemNativeRegFreeTmp(pReNative, idxRegTmp);
1388 return off;
1389}
1390
1391
1392/**
1393 * Macro that implements TLB loading and updating pbInstrBuf updating for an
1394 * instruction crossing into a new page.
1395 *
1396 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
1397 */
1398#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) \
1399 RT_NOREF(a_cbInstr); \
1400 off = iemNativeEmitBltLoadTlbForNewPage(pReNative, off, pTb, a_idxRange, a_offInstr)
1401
1402#define LIVENESS_LOAD_TLB_FOR_NEW_PAGE(a_pOutgoing, a_pCallEntry) \
1403 if (!IEM_F_MODE_X86_IS_FLAT((uint32_t)(a_pCallEntry)->auParams[0] >> 8)) \
1404 IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, X86_SREG_CS); \
1405 else do { } while (0)
1406
1407DECL_FORCE_INLINE(uint32_t)
1408iemNativeEmitBltLoadTlbForNewPage(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb, uint8_t idxRange, uint8_t offInstr)
1409{
1410#ifdef VBOX_STRICT
1411 off = iemNativeEmitMarker(pReNative, off, 0x80000005);
1412#endif
1413
1414 /*
1415 * Define labels and allocate the register for holding the GCPhys of the new page.
1416 */
1417 uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++;
1418 uint32_t const idxRegGCPhys = iemNativeRegAllocTmp(pReNative, &off);
1419 IEMNATIVEEMITTLBSTATE const TlbState(pReNative, IEM_F_MODE_X86_IS_FLAT(pReNative->fExec), &off);
1420 uint32_t const idxLabelTlbLookup = !TlbState.fSkip
1421 ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbLookup, UINT32_MAX, uTlbSeqNo)
1422 : UINT32_MAX;
1423
1424 //off = iemNativeEmitBrk(pReNative, off, 0x1111);
1425
1426 /*
1427 * Jump to the TLB lookup code.
1428 */
1429 if (!TlbState.fSkip)
1430 off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbLookup); /** @todo short jump */
1431
1432 /*
1433 * TlbMiss:
1434 *
1435 * Call iemNativeHlpMemCodeNewPageTlbMissWithOff to do the work.
1436 */
1437 uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, off, uTlbSeqNo);
1438
1439 /* Save variables in volatile registers. */
1440 uint32_t const fHstRegsNotToSave = TlbState.getRegsNotToSave() | RT_BIT_32(idxRegGCPhys);
1441 off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave);
1442
1443#ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING
1444 /* Do delayed EFLAGS calculations. There are no restrictions on volatile registers here. */
1445 off = iemNativeDoPostponedEFlagsAtTlbMiss<0>(pReNative, off, &TlbState, fHstRegsNotToSave);
1446#endif
1447
1448 /* IEMNATIVE_CALL_ARG1_GREG = offInstr */
1449 off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, offInstr);
1450
1451 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
1452 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
1453
1454 /* Done setting up parameters, make the call. */
1455 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMissWithOff);
1456
1457 /* Move the result to the right register. */
1458 if (idxRegGCPhys != IEMNATIVE_CALL_RET_GREG)
1459 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegGCPhys, IEMNATIVE_CALL_RET_GREG);
1460
1461 /* Restore variables and guest shadow registers to volatile registers. */
1462 off = iemNativeVarRestoreVolatileRegsPostHlpCall(pReNative, off, fHstRegsNotToSave);
1463 off = iemNativeRegRestoreGuestShadowsInVolatileRegs(pReNative, off, TlbState.getActiveRegsWithShadows(true /*fCode*/));
1464
1465#ifdef IEMNATIVE_WITH_TLB_LOOKUP
1466 if (!TlbState.fSkip)
1467 {
1468 /* end of TlbMiss - Jump to the done label. */
1469 uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo);
1470 off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbDone);
1471
1472 /*
1473 * TlbLookup:
1474 */
1475 off = iemNativeEmitTlbLookup<false, 1 /*cbMem*/, 0 /*fAlignMask*/,
1476 IEM_ACCESS_TYPE_EXEC>(pReNative, off, &TlbState,
1477 IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) ? UINT8_MAX : X86_SREG_CS,
1478 idxLabelTlbLookup, idxLabelTlbMiss, idxRegGCPhys, offInstr);
1479
1480# ifdef IEM_WITH_TLB_STATISTICS
1481 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2,
1482 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeCodeTlbHitsForNewPageWithOffset));
1483# endif
1484
1485 /*
1486 * TlbDone:
1487 */
1488 iemNativeLabelDefine(pReNative, idxLabelTlbDone, off);
1489 TlbState.freeRegsAndReleaseVars(pReNative, UINT8_MAX /*idxVarGCPtrMem*/, true /*fIsCode*/);
1490 }
1491#else
1492 RT_NOREF(idxLabelTlbMiss);
1493#endif
1494
1495 /*
1496 * Now check the physical address of the page matches the expected one.
1497 */
1498 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(pTb, idxRange);
1499 off = iemNativeEmitTbExitIfGprNotEqualImm<kIemNativeLabelType_ObsoleteTb>(pReNative, off, idxRegGCPhys, GCPhysNewPage);
1500
1501 iemNativeRegFreeTmp(pReNative, idxRegGCPhys);
1502 return off;
1503}
1504
1505
1506/**
1507 * Macro that implements TLB loading and updating pbInstrBuf updating when
1508 * branching or when crossing a page on an instruction boundrary.
1509 *
1510 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
1511 * it is an inter-page branch and also check the page offset.
1512 *
1513 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
1514 */
1515#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) \
1516 RT_NOREF(a_cbInstr); \
1517 off = iemNativeEmitBltLoadTlbAfterBranch(pReNative, off, pTb, a_idxRange)
1518
1519#define LIVENESS_LOAD_TLB_AFTER_BRANCH(a_pOutgoing, a_pCallEntry) \
1520 if (!IEM_F_MODE_X86_IS_FLAT((uint32_t)(a_pCallEntry)->auParams[0] >> 8)) \
1521 IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, X86_SREG_CS); \
1522 else do { } while (0)
1523
1524DECL_FORCE_INLINE(uint32_t)
1525iemNativeEmitBltLoadTlbAfterBranch(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb, uint8_t idxRange)
1526{
1527#ifdef VBOX_STRICT
1528 off = iemNativeEmitMarker(pReNative, off, 0x80000006);
1529#endif
1530
1531 BODY_FLUSH_PENDING_WRITES();
1532
1533 /*
1534 * Define labels and allocate the register for holding the GCPhys of the new page.
1535 */
1536 uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++;
1537 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(pTb, idxRange)
1538 | pTb->aRanges[idxRange].offPhysPage;
1539
1540 /*
1541 *
1542 * First check if RIP is within the current code.
1543 *
1544 * This is very similar to iemNativeEmitBltInCheckPcAfterBranch, the only
1545 * difference is what we do when stuff doesn't match up.
1546 *
1547 * What we to do is.
1548 * 1. Calculate the FLAT PC (RIP + CS.BASE).
1549 * 2. Subtract iem.s.uInstrBufPc from it and getting 'off'.
1550 * 3. The 'off' must be less than X86_PAGE_SIZE/cbInstrBufTotal or
1551 * we need to retranslate RIP via the TLB.
1552 * 4. Add 'off' to iem.s.GCPhysInstrBuf and compare with the
1553 * GCPhysRangePageWithOffset constant mentioned above.
1554 *
1555 * The adding of CS.BASE to RIP can be skipped in the first step if we're
1556 * in 64-bit code or flat 32-bit.
1557 *
1558 */
1559
1560 /* Allocate registers for step 1. Get the shadowed stuff before allocating
1561 the temp register, so we don't accidentally clobber something we'll be
1562 needing again immediately. This is why we get idxRegCsBase here.
1563 Update: We share registers with the TlbState, as the TLB code path has
1564 little in common with the rest of the code. */
1565 bool const fIsFlat = IEM_F_MODE_X86_IS_FLAT(pReNative->fExec);
1566 IEMNATIVEEMITTLBSTATE const TlbState(pReNative, fIsFlat, &off);
1567 uint8_t const idxRegPc = !TlbState.fSkip ? TlbState.idxRegPtr
1568 : iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
1569 kIemNativeGstRegUse_ReadOnly, true /*fNoVolatileRegs*/);
1570 uint8_t const idxRegCsBase = !TlbState.fSkip || fIsFlat ? TlbState.idxRegSegBase
1571 : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS),
1572 kIemNativeGstRegUse_ReadOnly, true /*fNoVolatileRegs*/);
1573
1574 uint8_t const idxRegTmp = !TlbState.fSkip ? TlbState.idxReg1 : iemNativeRegAllocTmp(pReNative, &off);
1575 uint8_t const idxRegTmp2 = !TlbState.fSkip ? TlbState.idxReg2 : iemNativeRegAllocTmp(pReNative, &off);
1576 uint8_t const idxRegDummy = !TlbState.fSkip ? iemNativeRegAllocTmp(pReNative, &off) : UINT8_MAX;
1577
1578#ifdef VBOX_STRICT
1579 /* Do assertions before idxRegTmp contains anything. */
1580 Assert(RT_SIZEOFMEMB(VMCPUCC, iem.s.cbInstrBufTotal) == sizeof(uint16_t));
1581# ifdef RT_ARCH_AMD64
1582 {
1583 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8+2+1 + 11+2+1);
1584 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
1585 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1586 {
1587 /* cmp r/m64, imm8 */
1588 pbCodeBuf[off++] = X86_OP_REX_W;
1589 pbCodeBuf[off++] = 0x83;
1590 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 7, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base));
1591 pbCodeBuf[off++] = 0;
1592 /* je rel8 */
1593 pbCodeBuf[off++] = 0x74;
1594 pbCodeBuf[off++] = 1;
1595 /* int3 */
1596 pbCodeBuf[off++] = 0xcc;
1597
1598 }
1599
1600 /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); - done later by the non-x86 code */
1601 /* test r/m64, imm32 */
1602 pbCodeBuf[off++] = X86_OP_REX_W;
1603 pbCodeBuf[off++] = 0xf7;
1604 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 0, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1605 pbCodeBuf[off++] = RT_BYTE1(X86_PAGE_OFFSET_MASK);
1606 pbCodeBuf[off++] = RT_BYTE2(X86_PAGE_OFFSET_MASK);
1607 pbCodeBuf[off++] = RT_BYTE3(X86_PAGE_OFFSET_MASK);
1608 pbCodeBuf[off++] = RT_BYTE4(X86_PAGE_OFFSET_MASK);
1609 /* jz rel8 */
1610 pbCodeBuf[off++] = 0x74;
1611 pbCodeBuf[off++] = 1;
1612 /* int3 */
1613 pbCodeBuf[off++] = 0xcc;
1614 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1615 }
1616# else
1617
1618 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
1619 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1620 {
1621 off = iemNativeEmitLoadGprWithGstRegT<kIemNativeGstReg_CsBase>(pReNative, off, idxRegTmp);
1622# ifdef RT_ARCH_ARM64
1623 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
1624 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, idxRegTmp);
1625 pu32CodeBuf[off++] = Armv8A64MkInstrBrk(0x2006);
1626 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1627# else
1628# error "Port me!"
1629# endif
1630 }
1631# endif
1632
1633#endif /* VBOX_STRICT */
1634
1635 /* Because we're lazy, we'll jump back here to recalc 'off' and share the
1636 GCPhysRangePageWithOffset check. This is a little risky, so we use the
1637 2nd register to check if we've looped more than once already.*/
1638 off = iemNativeEmitGprZero(pReNative, off, idxRegTmp2);
1639
1640 uint32_t const offLabelRedoChecks = off;
1641
1642 /* 1+2. Calculate 'off' first (into idxRegTmp). */
1643 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.uInstrBufPc));
1644 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
1645 {
1646#ifdef RT_ARCH_ARM64
1647 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1648 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegPc, idxRegTmp);
1649 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1650#else
1651 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
1652 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
1653#endif
1654 }
1655 else
1656 {
1657#ifdef RT_ARCH_ARM64
1658 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
1659 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegCsBase, idxRegTmp);
1660 pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegPc);
1661 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1662#else
1663 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
1664 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegCsBase);
1665 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
1666#endif
1667 }
1668
1669 /* 3. Check that off is less than X86_PAGE_SIZE/cbInstrBufTotal.
1670 Unlike iemNativeEmitBltInCheckPcAfterBranch we'll jump to the TLB loading if this fails. */
1671 off = iemNativeEmitCmpGprWithImm(pReNative, off, idxRegTmp, X86_PAGE_SIZE - 1);
1672 uint32_t const offFixedJumpToTlbLoad = off;
1673 off = iemNativeEmitJaToFixed(pReNative, off, off /* (ASSUME ja rel8 suffices) */);
1674
1675 /* 4a. Add iem.s.GCPhysInstrBuf to off ... */
1676#ifdef RT_ARCH_AMD64
1677 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1678 pbCodeBuf[off++] = idxRegTmp < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R;
1679 pbCodeBuf[off++] = 0x03; /* add r64, r/m64 */
1680 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1681 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1682
1683#elif defined(RT_ARCH_ARM64)
1684
1685 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp2, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
1686 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1687 pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegTmp2);
1688 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1689
1690# ifdef VBOX_STRICT /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); */
1691 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegTmp2, X86_PAGE_OFFSET_MASK, true /*fSetFlags*/);
1692 off = iemNativeEmitJzToFixed(pReNative, off, off + 2 /* correct for ARM64 */);
1693 off = iemNativeEmitBrk(pReNative, off, 0x2005);
1694# endif
1695#else
1696# error "Port me"
1697#endif
1698
1699 /* 4b. ... and compare with GCPhysRangePageWithOffset.
1700
1701 Unlike iemNativeEmitBltInCheckPcAfterBranch we'll have to be more
1702 careful and avoid implicit temporary register usage here.
1703
1704 Unlike the threaded version of this code, we do not obsolete TBs here to
1705 reduce the code size and because indirect calls may legally end at the
1706 same offset in two different pages depending on the program state. */
1707 /** @todo synch the threaded BODY_LOAD_TLB_AFTER_BRANCH version with this. */
1708 off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegTmp2, GCPhysRangePageWithOffset);
1709 off = iemNativeEmitCmpGprWithGpr(pReNative, off, idxRegTmp, idxRegTmp2);
1710 off = iemNativeEmitTbExitJnz<kIemNativeLabelType_CheckBranchMiss>(pReNative, off);
1711 uint32_t const offFixedJumpToEnd = off;
1712 off = iemNativeEmitJmpToFixed(pReNative, off, off + 512 /* force rel32 */);
1713
1714 /*
1715 * TlbLoad:
1716 *
1717 * First we try to go via the TLB.
1718 */
1719 iemNativeFixupFixedJump(pReNative, offFixedJumpToTlbLoad, off);
1720
1721 /* Check that we haven't been here before. */
1722 off = iemNativeEmitTbExitIfGprIsNotZero<kIemNativeLabelType_CheckBranchMiss>(pReNative, off, idxRegTmp2, false /*f64Bit*/);
1723
1724 /* Jump to the TLB lookup code. */
1725 uint32_t const idxLabelTlbLookup = !TlbState.fSkip
1726 ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbLookup, UINT32_MAX, uTlbSeqNo)
1727 : UINT32_MAX;
1728//off = iemNativeEmitBrk(pReNative, off, 0x1234);
1729 if (!TlbState.fSkip)
1730 off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbLookup); /** @todo short jump */
1731
1732 /*
1733 * TlbMiss:
1734 *
1735 * Call iemNativeHlpMemCodeNewPageTlbMiss to do the work.
1736 */
1737 uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, off, uTlbSeqNo);
1738 RT_NOREF(idxLabelTlbMiss);
1739
1740 /* Save variables in volatile registers. */
1741 uint32_t const fHstRegsNotToSave = TlbState.getRegsNotToSave() | RT_BIT_32(idxRegTmp) | RT_BIT_32(idxRegTmp2)
1742 | (idxRegDummy != UINT8_MAX ? RT_BIT_32(idxRegDummy) : 0);
1743 off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave);
1744
1745#ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING
1746 /* Do delayed EFLAGS calculations. There are no restrictions on volatile registers here. */
1747 off = iemNativeDoPostponedEFlagsAtTlbMiss<0>(pReNative, off, &TlbState, fHstRegsNotToSave);
1748#endif
1749
1750 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
1751 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
1752
1753 /* Done setting up parameters, make the call. */
1754 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMiss);
1755
1756 /* Restore variables and guest shadow registers to volatile registers. */
1757 off = iemNativeVarRestoreVolatileRegsPostHlpCall(pReNative, off, fHstRegsNotToSave);
1758 off = iemNativeRegRestoreGuestShadowsInVolatileRegs(pReNative, off,
1759 TlbState.getActiveRegsWithShadows()
1760 | RT_BIT_32(idxRegPc)
1761 | (idxRegCsBase != UINT8_MAX ? RT_BIT_32(idxRegCsBase) : 0));
1762
1763#ifdef IEMNATIVE_WITH_TLB_LOOKUP
1764 if (!TlbState.fSkip)
1765 {
1766 /* end of TlbMiss - Jump to the done label. */
1767 uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo);
1768 off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbDone);
1769
1770 /*
1771 * TlbLookup:
1772 */
1773 off = iemNativeEmitTlbLookup<false, 1 /*cbMem*/, 0 /*fAlignMask*/,
1774 IEM_ACCESS_TYPE_EXEC, true>(pReNative, off, &TlbState, fIsFlat ? UINT8_MAX : X86_SREG_CS,
1775 idxLabelTlbLookup, idxLabelTlbMiss, idxRegDummy);
1776
1777# ifdef IEM_WITH_TLB_STATISTICS
1778 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2,
1779 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeCodeTlbHitsForNewPage));
1780# endif
1781
1782 /*
1783 * TlbDone:
1784 */
1785 iemNativeLabelDefine(pReNative, idxLabelTlbDone, off);
1786 TlbState.freeRegsAndReleaseVars(pReNative, UINT8_MAX /*idxVarGCPtrMem*/, true /*fIsCode*/);
1787 }
1788#else
1789 RT_NOREF(idxLabelTlbMiss);
1790#endif
1791
1792 /* Jmp back to the start and redo the checks. */
1793 off = iemNativeEmitLoadGpr8Imm(pReNative, off, idxRegTmp2, 1); /* indicate that we've looped once already */
1794 off = iemNativeEmitJmpToFixed(pReNative, off, offLabelRedoChecks);
1795
1796 /*
1797 * End:
1798 *
1799 * The end.
1800 */
1801 iemNativeFixupFixedJump(pReNative, offFixedJumpToEnd, off);
1802
1803 if (!TlbState.fSkip)
1804 iemNativeRegFreeTmp(pReNative, idxRegDummy);
1805 else
1806 {
1807 iemNativeRegFreeTmp(pReNative, idxRegTmp2);
1808 iemNativeRegFreeTmp(pReNative, idxRegTmp);
1809 iemNativeRegFreeTmp(pReNative, idxRegPc);
1810 if (idxRegCsBase != UINT8_MAX)
1811 iemNativeRegFreeTmp(pReNative, idxRegCsBase);
1812 }
1813 return off;
1814}
1815
1816
1817#ifdef BODY_CHECK_CS_LIM
1818/**
1819 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
1820 * raising a \#GP(0) if this isn't the case.
1821 */
1822IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLim)
1823{
1824 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1825 BODY_SET_CUR_INSTR();
1826 BODY_FLUSH_PENDING_WRITES();
1827 BODY_CHECK_CS_LIM(cbInstr);
1828 return off;
1829}
1830
1831IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLim)
1832{
1833 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1834 LIVENESS_CHECK_CS_LIM(pOutgoing);
1835 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1836 RT_NOREF(pCallEntry);
1837}
1838#endif
1839
1840
1841#if defined(BODY_CHECK_OPCODES) && defined(BODY_CHECK_CS_LIM)
1842/**
1843 * Built-in function for re-checking opcodes and CS.LIM after an instruction
1844 * that may have modified them.
1845 */
1846IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodes)
1847{
1848 PCIEMTB const pTb = pReNative->pTbOrg;
1849 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1850 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1851 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1852 BODY_SET_CUR_INSTR();
1853 BODY_FLUSH_PENDING_WRITES();
1854 BODY_CHECK_CS_LIM(cbInstr);
1855 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1856 return off;
1857}
1858
1859IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodes)
1860{
1861 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1862 LIVENESS_CHECK_CS_LIM(pOutgoing);
1863 LIVENESS_CHECK_OPCODES(pOutgoing);
1864 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1865 RT_NOREF(pCallEntry);
1866}
1867#endif
1868
1869
1870#if defined(BODY_CHECK_OPCODES)
1871/**
1872 * Built-in function for re-checking opcodes after an instruction that may have
1873 * modified them.
1874 */
1875IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodes)
1876{
1877 PCIEMTB const pTb = pReNative->pTbOrg;
1878 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1879 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1880 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1881 BODY_SET_CUR_INSTR();
1882 BODY_FLUSH_PENDING_WRITES();
1883 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1884 return off;
1885}
1886
1887IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodes)
1888{
1889 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1890 LIVENESS_CHECK_OPCODES(pOutgoing);
1891 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1892 RT_NOREF(pCallEntry);
1893}
1894#endif
1895
1896
1897#if defined(BODY_CHECK_OPCODES) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
1898/**
1899 * Built-in function for re-checking opcodes and considering the need for CS.LIM
1900 * checking after an instruction that may have modified them.
1901 */
1902IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesConsiderCsLim)
1903{
1904 PCIEMTB const pTb = pReNative->pTbOrg;
1905 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1906 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1907 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1908 BODY_SET_CUR_INSTR();
1909 BODY_FLUSH_PENDING_WRITES();
1910 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
1911 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1912 return off;
1913}
1914
1915IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesConsiderCsLim)
1916{
1917 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1918 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
1919 LIVENESS_CHECK_OPCODES(pOutgoing);
1920 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1921 RT_NOREF(pCallEntry);
1922}
1923#endif
1924
1925
1926/*
1927 * Post-branching checkers.
1928 */
1929
1930#if defined(BODY_CHECK_OPCODES) && defined(BODY_CHECK_PC_AFTER_BRANCH) && defined(BODY_CHECK_CS_LIM)
1931/**
1932 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
1933 * after conditional branching within the same page.
1934 *
1935 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
1936 */
1937IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndPcAndOpcodes)
1938{
1939 PCIEMTB const pTb = pReNative->pTbOrg;
1940 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1941 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1942 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1943 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
1944 BODY_SET_CUR_INSTR();
1945 BODY_FLUSH_PENDING_WRITES();
1946 BODY_CHECK_CS_LIM(cbInstr);
1947 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
1948 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1949 //LogFunc(("okay\n"));
1950 return off;
1951}
1952
1953IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndPcAndOpcodes)
1954{
1955 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1956 LIVENESS_CHECK_CS_LIM(pOutgoing);
1957 LIVENESS_CHECK_PC_AFTER_BRANCH(pOutgoing, pCallEntry);
1958 LIVENESS_CHECK_OPCODES(pOutgoing);
1959 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1960 RT_NOREF(pCallEntry);
1961}
1962#endif
1963
1964
1965#if defined(BODY_CHECK_OPCODES) && defined(BODY_CHECK_PC_AFTER_BRANCH)
1966/**
1967 * Built-in function for checking the PC and checking opcodes after conditional
1968 * branching within the same page.
1969 *
1970 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
1971 */
1972IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckPcAndOpcodes)
1973{
1974 PCIEMTB const pTb = pReNative->pTbOrg;
1975 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
1976 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
1977 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
1978 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
1979 BODY_SET_CUR_INSTR();
1980 BODY_FLUSH_PENDING_WRITES();
1981 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
1982 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
1983 //LogFunc(("okay\n"));
1984 return off;
1985}
1986
1987IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckPcAndOpcodes)
1988{
1989 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1990 LIVENESS_CHECK_PC_AFTER_BRANCH(pOutgoing, pCallEntry);
1991 LIVENESS_CHECK_OPCODES(pOutgoing);
1992 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
1993 RT_NOREF(pCallEntry);
1994}
1995#endif
1996
1997
1998#if defined(BODY_CHECK_OPCODES) && defined(BODY_CHECK_PC_AFTER_BRANCH) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
1999/**
2000 * Built-in function for checking the PC and checking opcodes and considering
2001 * the need for CS.LIM checking after conditional branching within the same
2002 * page.
2003 *
2004 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
2005 */
2006IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
2007{
2008 PCIEMTB const pTb = pReNative->pTbOrg;
2009 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2010 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2011 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
2012 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
2013 BODY_SET_CUR_INSTR();
2014 BODY_FLUSH_PENDING_WRITES();
2015 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
2016 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
2017 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
2018 //LogFunc(("okay\n"));
2019 return off;
2020}
2021
2022IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
2023{
2024 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2025 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
2026 LIVENESS_CHECK_PC_AFTER_BRANCH(pOutgoing, pCallEntry);
2027 LIVENESS_CHECK_OPCODES(pOutgoing);
2028 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2029 RT_NOREF(pCallEntry);
2030}
2031#endif
2032
2033
2034#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_AFTER_BRANCH) && defined(BODY_CHECK_CS_LIM)
2035/**
2036 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
2037 * transitioning to a different code page.
2038 *
2039 * The code page transition can either be natural over onto the next page (with
2040 * the instruction starting at page offset zero) or by means of branching.
2041 *
2042 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
2043 */
2044IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
2045{
2046 PCIEMTB const pTb = pReNative->pTbOrg;
2047 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2048 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2049 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
2050 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
2051 BODY_SET_CUR_INSTR();
2052 BODY_FLUSH_PENDING_WRITES();
2053 BODY_CHECK_CS_LIM(cbInstr);
2054 Assert(offRange == 0);
2055 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
2056 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
2057 //LogFunc(("okay\n"));
2058 return off;
2059}
2060
2061IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
2062{
2063 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2064 LIVENESS_CHECK_CS_LIM(pOutgoing);
2065 LIVENESS_LOAD_TLB_AFTER_BRANCH(pOutgoing, pCallEntry);
2066 LIVENESS_CHECK_OPCODES(pOutgoing);
2067 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2068 RT_NOREF(pCallEntry);
2069}
2070#endif
2071
2072
2073#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_AFTER_BRANCH)
2074/**
2075 * Built-in function for loading TLB and checking opcodes when transitioning to
2076 * a different code page.
2077 *
2078 * The code page transition can either be natural over onto the next page (with
2079 * the instruction starting at page offset zero) or by means of branching.
2080 *
2081 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
2082 */
2083IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesLoadingTlb)
2084{
2085 PCIEMTB const pTb = pReNative->pTbOrg;
2086 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2087 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2088 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
2089 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
2090 BODY_SET_CUR_INSTR();
2091 BODY_FLUSH_PENDING_WRITES();
2092 Assert(offRange == 0);
2093 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
2094 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
2095 //LogFunc(("okay\n"));
2096 return off;
2097}
2098
2099IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesLoadingTlb)
2100{
2101 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2102 LIVENESS_LOAD_TLB_AFTER_BRANCH(pOutgoing, pCallEntry);
2103 LIVENESS_CHECK_OPCODES(pOutgoing);
2104 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2105 RT_NOREF(pCallEntry);
2106}
2107#endif
2108
2109
2110#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_AFTER_BRANCH) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
2111/**
2112 * Built-in function for loading TLB and checking opcodes and considering the
2113 * need for CS.LIM checking when transitioning to a different code page.
2114 *
2115 * The code page transition can either be natural over onto the next page (with
2116 * the instruction starting at page offset zero) or by means of branching.
2117 *
2118 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
2119 */
2120IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
2121{
2122 PCIEMTB const pTb = pReNative->pTbOrg;
2123 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2124 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2125 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
2126 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
2127 BODY_SET_CUR_INSTR();
2128 BODY_FLUSH_PENDING_WRITES();
2129 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
2130 Assert(offRange == 0);
2131 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
2132 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
2133 //LogFunc(("okay\n"));
2134 return off;
2135}
2136
2137IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
2138{
2139 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2140 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
2141 LIVENESS_LOAD_TLB_AFTER_BRANCH(pOutgoing, pCallEntry);
2142 LIVENESS_CHECK_OPCODES(pOutgoing);
2143 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2144 RT_NOREF(pCallEntry);
2145}
2146#endif
2147
2148
2149
2150/*
2151 * Natural page crossing checkers.
2152 */
2153
2154#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CHECK_CS_LIM)
2155/**
2156 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
2157 * both pages when transitioning to a different code page.
2158 *
2159 * This is used when the previous instruction requires revalidation of opcodes
2160 * bytes and the current instruction stries a page boundrary with opcode bytes
2161 * in both the old and new page.
2162 *
2163 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
2164 */
2165IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
2166{
2167 PCIEMTB const pTb = pReNative->pTbOrg;
2168 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2169 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2170 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2171 uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
2172 uint32_t const idxRange2 = idxRange1 + 1;
2173 BODY_SET_CUR_INSTR();
2174 BODY_FLUSH_PENDING_WRITES();
2175 BODY_CHECK_CS_LIM(cbInstr);
2176 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
2177 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2178 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2179 return off;
2180}
2181
2182IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
2183{
2184 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2185 LIVENESS_CHECK_CS_LIM(pOutgoing);
2186 LIVENESS_CHECK_OPCODES(pOutgoing);
2187 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2188 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2189 RT_NOREF(pCallEntry);
2190}
2191#endif
2192
2193
2194#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE)
2195/**
2196 * Built-in function for loading TLB and checking opcodes on both pages when
2197 * transitioning to a different code page.
2198 *
2199 * This is used when the previous instruction requires revalidation of opcodes
2200 * bytes and the current instruction stries a page boundrary with opcode bytes
2201 * in both the old and new page.
2202 *
2203 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
2204 */
2205IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
2206{
2207 PCIEMTB const pTb = pReNative->pTbOrg;
2208 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2209 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2210 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2211 uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
2212 uint32_t const idxRange2 = idxRange1 + 1;
2213 BODY_SET_CUR_INSTR();
2214 BODY_FLUSH_PENDING_WRITES();
2215 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
2216 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2217 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2218 return off;
2219}
2220
2221IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
2222{
2223 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2224 LIVENESS_CHECK_OPCODES(pOutgoing);
2225 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2226 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2227 RT_NOREF(pCallEntry);
2228}
2229#endif
2230
2231
2232#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
2233/**
2234 * Built-in function for loading TLB and checking opcodes on both pages and
2235 * considering the need for CS.LIM checking when transitioning to a different
2236 * code page.
2237 *
2238 * This is used when the previous instruction requires revalidation of opcodes
2239 * bytes and the current instruction stries a page boundrary with opcode bytes
2240 * in both the old and new page.
2241 *
2242 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
2243 */
2244IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
2245{
2246 PCIEMTB const pTb = pReNative->pTbOrg;
2247 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2248 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2249 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2250 uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
2251 uint32_t const idxRange2 = idxRange1 + 1;
2252 BODY_SET_CUR_INSTR();
2253 BODY_FLUSH_PENDING_WRITES();
2254 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
2255 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
2256 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2257 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2258 return off;
2259}
2260
2261IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
2262{
2263 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2264 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
2265 LIVENESS_CHECK_OPCODES(pOutgoing);
2266 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2267 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2268 RT_NOREF(pCallEntry);
2269}
2270#endif
2271
2272
2273#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CHECK_CS_LIM)
2274/**
2275 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
2276 * advancing naturally to a different code page.
2277 *
2278 * Only opcodes on the new page is checked.
2279 *
2280 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
2281 */
2282IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
2283{
2284 PCIEMTB const pTb = pReNative->pTbOrg;
2285 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2286 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2287 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2288 //uint32_t const offRange1 = (uint32_t)uParam2;
2289 uint32_t const idxRange2 = idxRange1 + 1;
2290 BODY_SET_CUR_INSTR();
2291 BODY_FLUSH_PENDING_WRITES();
2292 BODY_CHECK_CS_LIM(cbInstr);
2293 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2294 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2295 return off;
2296}
2297
2298IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
2299{
2300 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2301 LIVENESS_CHECK_CS_LIM(pOutgoing);
2302 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2303 LIVENESS_CHECK_OPCODES(pOutgoing);
2304 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2305 RT_NOREF(pCallEntry);
2306}
2307#endif
2308
2309
2310#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE)
2311/**
2312 * Built-in function for loading TLB and checking opcodes when advancing
2313 * naturally to a different code page.
2314 *
2315 * Only opcodes on the new page is checked.
2316 *
2317 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
2318 */
2319IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
2320{
2321 PCIEMTB const pTb = pReNative->pTbOrg;
2322 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2323 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2324 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2325 //uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
2326 uint32_t const idxRange2 = idxRange1 + 1;
2327 BODY_SET_CUR_INSTR();
2328 BODY_FLUSH_PENDING_WRITES();
2329 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2330 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2331 return off;
2332}
2333
2334IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
2335{
2336 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2337 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2338 LIVENESS_CHECK_OPCODES(pOutgoing);
2339 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2340 RT_NOREF(pCallEntry);
2341}
2342#endif
2343
2344
2345#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
2346/**
2347 * Built-in function for loading TLB and checking opcodes and considering the
2348 * need for CS.LIM checking when advancing naturally to a different code page.
2349 *
2350 * Only opcodes on the new page is checked.
2351 *
2352 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
2353 */
2354IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
2355{
2356 PCIEMTB const pTb = pReNative->pTbOrg;
2357 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2358 uint32_t const cbStartPage = (uint32_t)(pCallEntry->auParams[0] >> 32);
2359 uint32_t const idxRange1 = (uint32_t)pCallEntry->auParams[1];
2360 //uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2];
2361 uint32_t const idxRange2 = idxRange1 + 1;
2362 BODY_SET_CUR_INSTR();
2363 BODY_FLUSH_PENDING_WRITES();
2364 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
2365 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
2366 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
2367 return off;
2368}
2369
2370IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
2371{
2372 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2373 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
2374 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2375 LIVENESS_CHECK_OPCODES(pOutgoing);
2376 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2377 RT_NOREF(pCallEntry);
2378}
2379#endif
2380
2381
2382#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CHECK_CS_LIM)
2383/**
2384 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
2385 * advancing naturally to a different code page with first instr at byte 0.
2386 *
2387 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
2388 */
2389IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
2390{
2391 PCIEMTB const pTb = pReNative->pTbOrg;
2392 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2393 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2394 BODY_SET_CUR_INSTR();
2395 BODY_FLUSH_PENDING_WRITES();
2396 BODY_CHECK_CS_LIM(cbInstr);
2397 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
2398 //Assert(pVCpu->iem.s.offCurInstrStart == 0);
2399 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
2400 return off;
2401}
2402
2403IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
2404{
2405 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2406 LIVENESS_CHECK_CS_LIM(pOutgoing);
2407 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2408 LIVENESS_CHECK_OPCODES(pOutgoing);
2409 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2410 RT_NOREF(pCallEntry);
2411}
2412#endif
2413
2414
2415#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE)
2416/**
2417 * Built-in function for loading TLB and checking opcodes when advancing
2418 * naturally to a different code page with first instr at byte 0.
2419 *
2420 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
2421 */
2422IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
2423{
2424 PCIEMTB const pTb = pReNative->pTbOrg;
2425 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2426 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2427 BODY_SET_CUR_INSTR();
2428 BODY_FLUSH_PENDING_WRITES();
2429 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
2430 //Assert(pVCpu->iem.s.offCurInstrStart == 0);
2431 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
2432 return off;
2433}
2434
2435IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
2436{
2437 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2438 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2439 LIVENESS_CHECK_OPCODES(pOutgoing);
2440 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2441 RT_NOREF(pCallEntry);
2442}
2443#endif
2444
2445
2446#if defined(BODY_CHECK_OPCODES) && defined(BODY_LOAD_TLB_FOR_NEW_PAGE) && defined(BODY_CONSIDER_CS_LIM_CHECKING)
2447/**
2448 * Built-in function for loading TLB and checking opcodes and considering the
2449 * need for CS.LIM checking when advancing naturally to a different code page
2450 * with first instr at byte 0.
2451 *
2452 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
2453 */
2454IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
2455{
2456 PCIEMTB const pTb = pReNative->pTbOrg;
2457 uint32_t const cbInstr = (uint8_t)pCallEntry->auParams[0];
2458 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
2459 BODY_SET_CUR_INSTR();
2460 BODY_FLUSH_PENDING_WRITES();
2461 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
2462 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
2463 //Assert(pVCpu->iem.s.offCurInstrStart == 0);
2464 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
2465 return off;
2466}
2467
2468IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
2469{
2470 IEM_LIVENESS_RAW_INIT_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2471 LIVENESS_CONSIDER_CS_LIM_CHECKING(pOutgoing);
2472 LIVENESS_LOAD_TLB_FOR_NEW_PAGE(pOutgoing, pCallEntry);
2473 LIVENESS_CHECK_OPCODES(pOutgoing);
2474 IEM_LIVENESS_RAW_FINISH_WITH_POTENTIAL_CALL(pOutgoing, pIncoming);
2475 RT_NOREF(pCallEntry);
2476}
2477#endif
2478
2479
2480/**
2481 * Built-in function for jumping in the call sequence.
2482 */
2483IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_Jump)
2484{
2485 PCIEMTB const pTb = pReNative->pTbOrg;
2486 Assert(pCallEntry->auParams[1] == 0 && pCallEntry->auParams[2] == 0);
2487 Assert(pCallEntry->auParams[0] < pTb->Thrd.cCalls);
2488#if 1
2489 RT_NOREF(pCallEntry, pTb);
2490
2491# ifdef VBOX_WITH_STATISTICS
2492 /* Increment StatNativeTbExitLoopFullTb. */
2493 uint32_t const offStat = RT_UOFFSETOF(VMCPU, iem.s.StatNativeTbExitLoopFullTb);
2494# ifdef RT_ARCH_AMD64
2495 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, UINT8_MAX, UINT8_MAX, offStat);
2496# else
2497 uint8_t const idxStatsTmp1 = iemNativeRegAllocTmp(pReNative, &off);
2498 uint8_t const idxStatsTmp2 = iemNativeRegAllocTmp(pReNative, &off);
2499 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, idxStatsTmp1, idxStatsTmp2, offStat);
2500 iemNativeRegFreeTmp(pReNative, idxStatsTmp1);
2501 iemNativeRegFreeTmp(pReNative, idxStatsTmp2);
2502# endif
2503# endif
2504# ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
2505 /** @todo
2506 off = iemNativeEmitAddU32CounterInVCpuEx(pReNative, off, pTb->cInstructions, RT_UOFFSETOF(VMCPUCC, iem.s.cInstructions));
2507 */
2508# endif
2509
2510 /* Jump to the start of the TB. */
2511 uint32_t idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_LoopJumpTarget);
2512 AssertStmt(idxLabel < pReNative->cLabels, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_6)); /** @todo better status */
2513 return iemNativeEmitJmpToLabel(pReNative, off, idxLabel);
2514#else
2515 RT_NOREF(pReNative, pCallEntry, pTb);
2516 return off;
2517#endif
2518}
2519
2520IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(iemNativeLivenessFunc_BltIn_Jump)
2521{
2522 /* We could also use UNUSED here, but this'll is equivialent (at the moment). */
2523 IEM_LIVENESS_RAW_INIT_WITH_CALL(pOutgoing, pIncoming);
2524 RT_NOREF(pCallEntry);
2525}
2526
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette