VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp@ 108260

Last change on this file since 108260 was 108260, checked in by vboxsync, 3 months ago

VMM/IEM: Splitting up IEMInline.h. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 45.5 KB
Line 
1/* $Id: IEMAllOpcodeFetch-x86.cpp 108260 2025-02-17 15:24:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/dbgf.h>
41#include "IEMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/log.h>
44#include <VBox/param.h>
45#include <iprt/assert.h>
46#include <iprt/errcore.h>
47#include <iprt/string.h>
48#include <iprt/x86.h>
49
50#include "IEMInline.h"
51#include "IEMInline-x86.h"
52#include "IEMAllTlbInline-x86.h"
53
54
55#ifndef IEM_WITH_CODE_TLB
56/**
57 * Prefetch opcodes the first time when starting executing.
58 *
59 * @returns Strict VBox status code.
60 * @param pVCpu The cross context virtual CPU structure of the calling
61 * thread.
62 */
63VBOXSTRICTRC iemOpcodeFetchPrefetch(PVMCPUCC pVCpu) RT_NOEXCEPT
64{
65 /*
66 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
67 *
68 * First translate CS:rIP to a physical address.
69 *
70 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
71 * all relevant bytes from the first page, as it ASSUMES it's only ever
72 * called for dealing with CS.LIM, page crossing and instructions that
73 * are too long.
74 */
75 uint32_t cbToTryRead;
76 RTGCPTR GCPtrPC;
77 if (IEM_IS_64BIT_CODE(pVCpu))
78 {
79 cbToTryRead = GUEST_PAGE_SIZE;
80 GCPtrPC = pVCpu->cpum.GstCtx.rip;
81 if (IEM_IS_CANONICAL(GCPtrPC))
82 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
83 else
84 return iemRaiseGeneralProtectionFault0(pVCpu);
85 }
86 else
87 {
88 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
89 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
90 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
91 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
92 else
93 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
94 if (cbToTryRead) { /* likely */ }
95 else /* overflowed */
96 {
97 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
98 cbToTryRead = UINT32_MAX;
99 }
100 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
101 Assert(GCPtrPC <= UINT32_MAX);
102 }
103
104 PGMPTWALKFAST WalkFast;
105 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
106 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
107 &WalkFast);
108 if (RT_SUCCESS(rc))
109 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
110 else
111 {
112 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
113# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
114/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
115 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
116 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
117 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
118# endif
119 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
120 }
121#if 0
122 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
123 else
124 {
125 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
126# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
127/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
128# error completely wrong
129 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
130 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
131# endif
132 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
133 }
134 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
135 else
136 {
137 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
138# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
139/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
140# error completely wrong.
141 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
142 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
143# endif
144 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
145 }
146#else
147 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
148 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
149#endif
150 RTGCPHYS const GCPhys = WalkFast.GCPhys;
151
152 /*
153 * Read the bytes at this address.
154 */
155 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
156 if (cbToTryRead > cbLeftOnPage)
157 cbToTryRead = cbLeftOnPage;
158 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
159 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
160
161 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
162 {
163 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
164 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
165 { /* likely */ }
166 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
167 {
168 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
169 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
170 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
171 }
172 else
173 {
174 Log((RT_SUCCESS(rcStrict)
175 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
176 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
177 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
178 return rcStrict;
179 }
180 }
181 else
182 {
183 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
184 if (RT_SUCCESS(rc))
185 { /* likely */ }
186 else
187 {
188 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
189 GCPtrPC, GCPhys, rc, cbToTryRead));
190 return rc;
191 }
192 }
193 pVCpu->iem.s.cbOpcode = cbToTryRead;
194 return VINF_SUCCESS;
195}
196#endif /* !IEM_WITH_CODE_TLB */
197
198
199/**
200 * Flushes the prefetch buffer, light version.
201 */
202void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
203{
204#ifndef IEM_WITH_CODE_TLB
205 pVCpu->iem.s.cbOpcode = cbInstr;
206#else
207 RT_NOREF(pVCpu, cbInstr);
208#endif
209}
210
211
212/**
213 * Flushes the prefetch buffer, heavy version.
214 */
215void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
216{
217#ifndef IEM_WITH_CODE_TLB
218 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
219#elif 1
220 pVCpu->iem.s.cbInstrBufTotal = 0;
221 RT_NOREF(cbInstr);
222#else
223 RT_NOREF(pVCpu, cbInstr);
224#endif
225}
226
227
228
229#ifdef IEM_WITH_CODE_TLB
230
231/**
232 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
233 * failure and jumps.
234 *
235 * We end up here for a number of reasons:
236 * - pbInstrBuf isn't yet initialized.
237 * - Advancing beyond the buffer boundrary (e.g. cross page).
238 * - Advancing beyond the CS segment limit.
239 * - Fetching from non-mappable page (e.g. MMIO).
240 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
241 *
242 * @param pVCpu The cross context virtual CPU structure of the
243 * calling thread.
244 * @param pvDst Where to return the bytes.
245 * @param cbDst Number of bytes to read. A value of zero is
246 * allowed for initializing pbInstrBuf (the
247 * recompiler does this). In this case it is best
248 * to set pbInstrBuf to NULL prior to the call.
249 */
250void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
251{
252# ifdef IN_RING3
253 for (;;)
254 {
255 Assert(cbDst <= 8);
256 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
257
258 /*
259 * We might have a partial buffer match, deal with that first to make the
260 * rest simpler. This is the first part of the cross page/buffer case.
261 */
262 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
263 if (pbInstrBuf != NULL)
264 {
265 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
266 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
267 if (offBuf < cbInstrBuf)
268 {
269 Assert(offBuf + cbDst > cbInstrBuf);
270 uint32_t const cbCopy = cbInstrBuf - offBuf;
271 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
272
273 cbDst -= cbCopy;
274 pvDst = (uint8_t *)pvDst + cbCopy;
275 offBuf += cbCopy;
276 }
277 }
278
279 /*
280 * Check segment limit, figuring how much we're allowed to access at this point.
281 *
282 * We will fault immediately if RIP is past the segment limit / in non-canonical
283 * territory. If we do continue, there are one or more bytes to read before we
284 * end up in trouble and we need to do that first before faulting.
285 */
286 RTGCPTR GCPtrFirst;
287 uint32_t cbMaxRead;
288 if (IEM_IS_64BIT_CODE(pVCpu))
289 {
290 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
291 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
292 { /* likely */ }
293 else
294 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
295 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
296 }
297 else
298 {
299 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
300 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
301 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
302 { /* likely */ }
303 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
304 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
305 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
306 if (cbMaxRead != 0)
307 { /* likely */ }
308 else
309 {
310 /* Overflowed because address is 0 and limit is max. */
311 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
312 cbMaxRead = X86_PAGE_SIZE;
313 }
314 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
315 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
316 if (cbMaxRead2 < cbMaxRead)
317 cbMaxRead = cbMaxRead2;
318 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
319 }
320
321 /*
322 * Get the TLB entry for this piece of code.
323 */
324 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
325 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
326 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
327 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
328 {
329 /* likely when executing lots of code, otherwise unlikely */
330# ifdef IEM_WITH_TLB_STATISTICS
331 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
332# endif
333 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
334
335 /* Check TLB page table level access flags. */
336 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
337 {
338 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
339 {
340 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
341 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
342 }
343 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
344 {
345 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
346 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
347 }
348 }
349
350 /* Look up the physical page info if necessary. */
351 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
352 { /* not necessary */ }
353 else
354 {
355 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
356 { /* likely */ }
357 else
358 iemTlbInvalidateAllPhysicalSlow(pVCpu);
359 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
360 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
361 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
362 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
363 }
364 }
365 else
366 {
367 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
368
369 /* This page table walking will set A bits as required by the access while performing the walk.
370 ASSUMES these are set when the address is translated rather than on commit... */
371 /** @todo testcase: check when A bits are actually set by the CPU for code. */
372 PGMPTWALKFAST WalkFast;
373 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
374 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
375 &WalkFast);
376 if (RT_SUCCESS(rc))
377 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
378 else
379 {
380# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
381 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
382 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
383# endif
384 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
385 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
386 }
387
388 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
389 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
390 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
391 {
392 pTlbe--;
393 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
394 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
395 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
396# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
397 else
398 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
399# endif
400 }
401 else
402 {
403 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
404 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
405 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
406 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
407# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
408 else
409 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
410# endif
411 }
412 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
413 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
414 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
415 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
416 pTlbe->GCPhys = GCPhysPg;
417 pTlbe->pbMappingR3 = NULL;
418 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
419 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
420 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
421
422 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
423 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
424 else
425 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
426
427 /* Resolve the physical address. */
428 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
429 { /* likely */ }
430 else
431 iemTlbInvalidateAllPhysicalSlow(pVCpu);
432 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
433 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
434 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
435 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
436 }
437
438# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
439 /*
440 * Try do a direct read using the pbMappingR3 pointer.
441 * Note! Do not recheck the physical TLB revision number here as we have the
442 * wrong response to changes in the else case. If someone is updating
443 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
444 * pretending we always won the race.
445 */
446 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
447 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
448 {
449 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
450 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
451 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
452 {
453 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
454 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
455 }
456 else
457 {
458 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
459 if (cbInstr + (uint32_t)cbDst <= 15)
460 {
461 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
462 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
463 }
464 else
465 {
466 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
467 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
468 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
469 }
470 }
471 if (cbDst <= cbMaxRead)
472 {
473 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
474# if 0 /* unused */
475 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
476# endif
477 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
478 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
479 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
480 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
481 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
482 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
483 else
484 Assert(!pvDst);
485 return;
486 }
487 pVCpu->iem.s.pbInstrBuf = NULL;
488
489 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
490 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
491 }
492# else
493# error "refactor as needed"
494 /*
495 * If there is no special read handling, so we can read a bit more and
496 * put it in the prefetch buffer.
497 */
498 if ( cbDst < cbMaxRead
499 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
500 {
501 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
502 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
503 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
504 { /* likely */ }
505 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
506 {
507 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
508 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
509 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
510 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
511 }
512 else
513 {
514 Log((RT_SUCCESS(rcStrict)
515 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
516 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
517 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
518 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
519 }
520 }
521# endif
522 /*
523 * Special read handling, so only read exactly what's needed.
524 * This is a highly unlikely scenario.
525 */
526 else
527 {
528 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
529
530 /* Check instruction length. */
531 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
532 if (RT_LIKELY(cbInstr + cbDst <= 15))
533 { /* likely */ }
534 else
535 {
536 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
537 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
538 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
539 }
540
541 /* Do the reading. */
542 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
543 if (cbToRead > 0)
544 {
545 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
546 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
547 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
548 { /* likely */ }
549 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
550 {
551 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
552 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
553 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
554 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
555 }
556 else
557 {
558 Log((RT_SUCCESS(rcStrict)
559 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
560 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
561 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
562 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
563 }
564 }
565
566 /* Update the state and probably return. */
567 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
568 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
569# if 0 /* unused */
570 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
571# endif
572 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
573 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
574 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
575 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
576 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
577 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
578 pVCpu->iem.s.pbInstrBuf = NULL;
579 if (cbToRead == cbDst)
580 return;
581 Assert(cbToRead == cbMaxRead);
582 }
583
584 /*
585 * More to read, loop.
586 */
587 cbDst -= cbMaxRead;
588 pvDst = (uint8_t *)pvDst + cbMaxRead;
589 }
590# else /* !IN_RING3 */
591 RT_NOREF(pvDst, cbDst);
592 if (pvDst || cbDst)
593 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
594# endif /* !IN_RING3 */
595}
596
597#else /* !IEM_WITH_CODE_TLB */
598
599/**
600 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
601 * exception if it fails.
602 *
603 * @returns Strict VBox status code.
604 * @param pVCpu The cross context virtual CPU structure of the
605 * calling thread.
606 * @param cbMin The minimum number of bytes relative offOpcode
607 * that must be read.
608 */
609VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
610{
611 /*
612 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
613 *
614 * First translate CS:rIP to a physical address.
615 */
616 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
617 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
618 uint8_t const cbLeft = cbOpcode - offOpcode;
619 Assert(cbLeft < cbMin);
620 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
621
622 uint32_t cbToTryRead;
623 RTGCPTR GCPtrNext;
624 if (IEM_IS_64BIT_CODE(pVCpu))
625 {
626 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
627 if (!IEM_IS_CANONICAL(GCPtrNext))
628 return iemRaiseGeneralProtectionFault0(pVCpu);
629 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
630 }
631 else
632 {
633 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
634 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
635 GCPtrNext32 += cbOpcode;
636 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
637 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
638 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
639 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
640 if (!cbToTryRead) /* overflowed */
641 {
642 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
643 cbToTryRead = UINT32_MAX;
644 /** @todo check out wrapping around the code segment. */
645 }
646 if (cbToTryRead < cbMin - cbLeft)
647 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
648 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
649
650 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
651 if (cbToTryRead > cbLeftOnPage)
652 cbToTryRead = cbLeftOnPage;
653 }
654
655 /* Restrict to opcode buffer space.
656
657 We're making ASSUMPTIONS here based on work done previously in
658 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
659 be fetched in case of an instruction crossing two pages. */
660 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
661 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
662 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
663 { /* likely */ }
664 else
665 {
666 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
667 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
668 return iemRaiseGeneralProtectionFault0(pVCpu);
669 }
670
671 PGMPTWALKFAST WalkFast;
672 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
673 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
674 &WalkFast);
675 if (RT_SUCCESS(rc))
676 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
677 else
678 {
679 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
680#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
681 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
682 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
683#endif
684 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
685 }
686 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
687 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
688
689 RTGCPHYS const GCPhys = WalkFast.GCPhys;
690 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
691
692 /*
693 * Read the bytes at this address.
694 *
695 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
696 * and since PATM should only patch the start of an instruction there
697 * should be no need to check again here.
698 */
699 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
700 {
701 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
702 cbToTryRead, PGMACCESSORIGIN_IEM);
703 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
704 { /* likely */ }
705 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
706 {
707 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
708 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
709 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
710 }
711 else
712 {
713 Log((RT_SUCCESS(rcStrict)
714 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
715 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
716 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
717 return rcStrict;
718 }
719 }
720 else
721 {
722 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
723 if (RT_SUCCESS(rc))
724 { /* likely */ }
725 else
726 {
727 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
728 return rc;
729 }
730 }
731 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
732 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
733
734 return VINF_SUCCESS;
735}
736
737#endif /* !IEM_WITH_CODE_TLB */
738#ifndef IEM_WITH_SETJMP
739
740/**
741 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
742 *
743 * @returns Strict VBox status code.
744 * @param pVCpu The cross context virtual CPU structure of the
745 * calling thread.
746 * @param pb Where to return the opcode byte.
747 */
748VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
749{
750 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
751 if (rcStrict == VINF_SUCCESS)
752 {
753 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
754 *pb = pVCpu->iem.s.abOpcode[offOpcode];
755 pVCpu->iem.s.offOpcode = offOpcode + 1;
756 }
757 else
758 *pb = 0;
759 return rcStrict;
760}
761
762#else /* IEM_WITH_SETJMP */
763
764/**
765 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
766 *
767 * @returns The opcode byte.
768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
769 */
770uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
771{
772# ifdef IEM_WITH_CODE_TLB
773 uint8_t u8;
774 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
775 return u8;
776# else
777 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
778 if (rcStrict == VINF_SUCCESS)
779 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
780 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
781# endif
782}
783
784#endif /* IEM_WITH_SETJMP */
785
786#ifndef IEM_WITH_SETJMP
787
788/**
789 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
790 *
791 * @returns Strict VBox status code.
792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
793 * @param pu16 Where to return the opcode dword.
794 */
795VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
796{
797 uint8_t u8;
798 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
799 if (rcStrict == VINF_SUCCESS)
800 *pu16 = (int8_t)u8;
801 return rcStrict;
802}
803
804
805/**
806 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
807 *
808 * @returns Strict VBox status code.
809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
810 * @param pu32 Where to return the opcode dword.
811 */
812VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
813{
814 uint8_t u8;
815 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
816 if (rcStrict == VINF_SUCCESS)
817 *pu32 = (int8_t)u8;
818 return rcStrict;
819}
820
821
822/**
823 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
824 *
825 * @returns Strict VBox status code.
826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
827 * @param pu64 Where to return the opcode qword.
828 */
829VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
830{
831 uint8_t u8;
832 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
833 if (rcStrict == VINF_SUCCESS)
834 *pu64 = (int8_t)u8;
835 return rcStrict;
836}
837
838#endif /* !IEM_WITH_SETJMP */
839
840
841#ifndef IEM_WITH_SETJMP
842
843/**
844 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
845 *
846 * @returns Strict VBox status code.
847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
848 * @param pu16 Where to return the opcode word.
849 */
850VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
851{
852 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
853 if (rcStrict == VINF_SUCCESS)
854 {
855 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
856# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
857 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
858# else
859 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
860# endif
861 pVCpu->iem.s.offOpcode = offOpcode + 2;
862 }
863 else
864 *pu16 = 0;
865 return rcStrict;
866}
867
868#else /* IEM_WITH_SETJMP */
869
870/**
871 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
872 *
873 * @returns The opcode word.
874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
875 */
876uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
877{
878# ifdef IEM_WITH_CODE_TLB
879 uint16_t u16;
880 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
881 return u16;
882# else
883 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
884 if (rcStrict == VINF_SUCCESS)
885 {
886 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
887 pVCpu->iem.s.offOpcode += 2;
888# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
889 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
890# else
891 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
892# endif
893 }
894 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
895# endif
896}
897
898#endif /* IEM_WITH_SETJMP */
899
900#ifndef IEM_WITH_SETJMP
901
902/**
903 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
904 *
905 * @returns Strict VBox status code.
906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
907 * @param pu32 Where to return the opcode double word.
908 */
909VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
910{
911 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
912 if (rcStrict == VINF_SUCCESS)
913 {
914 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
915 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
916 pVCpu->iem.s.offOpcode = offOpcode + 2;
917 }
918 else
919 *pu32 = 0;
920 return rcStrict;
921}
922
923
924/**
925 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
926 *
927 * @returns Strict VBox status code.
928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
929 * @param pu64 Where to return the opcode quad word.
930 */
931VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
932{
933 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
934 if (rcStrict == VINF_SUCCESS)
935 {
936 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
937 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
938 pVCpu->iem.s.offOpcode = offOpcode + 2;
939 }
940 else
941 *pu64 = 0;
942 return rcStrict;
943}
944
945#endif /* !IEM_WITH_SETJMP */
946
947#ifndef IEM_WITH_SETJMP
948
949/**
950 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
951 *
952 * @returns Strict VBox status code.
953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
954 * @param pu32 Where to return the opcode dword.
955 */
956VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
957{
958 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
959 if (rcStrict == VINF_SUCCESS)
960 {
961 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
962# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
963 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
964# else
965 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
966 pVCpu->iem.s.abOpcode[offOpcode + 1],
967 pVCpu->iem.s.abOpcode[offOpcode + 2],
968 pVCpu->iem.s.abOpcode[offOpcode + 3]);
969# endif
970 pVCpu->iem.s.offOpcode = offOpcode + 4;
971 }
972 else
973 *pu32 = 0;
974 return rcStrict;
975}
976
977#else /* IEM_WITH_SETJMP */
978
979/**
980 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
981 *
982 * @returns The opcode dword.
983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
984 */
985uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
986{
987# ifdef IEM_WITH_CODE_TLB
988 uint32_t u32;
989 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
990 return u32;
991# else
992 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
993 if (rcStrict == VINF_SUCCESS)
994 {
995 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
996 pVCpu->iem.s.offOpcode = offOpcode + 4;
997# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
998 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
999# else
1000 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1001 pVCpu->iem.s.abOpcode[offOpcode + 1],
1002 pVCpu->iem.s.abOpcode[offOpcode + 2],
1003 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1004# endif
1005 }
1006 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1007# endif
1008}
1009
1010#endif /* IEM_WITH_SETJMP */
1011
1012#ifndef IEM_WITH_SETJMP
1013
1014/**
1015 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1016 *
1017 * @returns Strict VBox status code.
1018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1019 * @param pu64 Where to return the opcode dword.
1020 */
1021VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1022{
1023 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1024 if (rcStrict == VINF_SUCCESS)
1025 {
1026 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1027 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1028 pVCpu->iem.s.abOpcode[offOpcode + 1],
1029 pVCpu->iem.s.abOpcode[offOpcode + 2],
1030 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1031 pVCpu->iem.s.offOpcode = offOpcode + 4;
1032 }
1033 else
1034 *pu64 = 0;
1035 return rcStrict;
1036}
1037
1038
1039/**
1040 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1041 *
1042 * @returns Strict VBox status code.
1043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1044 * @param pu64 Where to return the opcode qword.
1045 */
1046VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1047{
1048 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1049 if (rcStrict == VINF_SUCCESS)
1050 {
1051 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1052 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1053 pVCpu->iem.s.abOpcode[offOpcode + 1],
1054 pVCpu->iem.s.abOpcode[offOpcode + 2],
1055 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1056 pVCpu->iem.s.offOpcode = offOpcode + 4;
1057 }
1058 else
1059 *pu64 = 0;
1060 return rcStrict;
1061}
1062
1063#endif /* !IEM_WITH_SETJMP */
1064
1065#ifndef IEM_WITH_SETJMP
1066
1067/**
1068 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1069 *
1070 * @returns Strict VBox status code.
1071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1072 * @param pu64 Where to return the opcode qword.
1073 */
1074VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1075{
1076 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1077 if (rcStrict == VINF_SUCCESS)
1078 {
1079 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1080# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1081 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1082# else
1083 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1084 pVCpu->iem.s.abOpcode[offOpcode + 1],
1085 pVCpu->iem.s.abOpcode[offOpcode + 2],
1086 pVCpu->iem.s.abOpcode[offOpcode + 3],
1087 pVCpu->iem.s.abOpcode[offOpcode + 4],
1088 pVCpu->iem.s.abOpcode[offOpcode + 5],
1089 pVCpu->iem.s.abOpcode[offOpcode + 6],
1090 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1091# endif
1092 pVCpu->iem.s.offOpcode = offOpcode + 8;
1093 }
1094 else
1095 *pu64 = 0;
1096 return rcStrict;
1097}
1098
1099#else /* IEM_WITH_SETJMP */
1100
1101/**
1102 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1103 *
1104 * @returns The opcode qword.
1105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1106 */
1107uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1108{
1109# ifdef IEM_WITH_CODE_TLB
1110 uint64_t u64;
1111 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1112 return u64;
1113# else
1114 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1115 if (rcStrict == VINF_SUCCESS)
1116 {
1117 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1118 pVCpu->iem.s.offOpcode = offOpcode + 8;
1119# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1120 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1121# else
1122 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1123 pVCpu->iem.s.abOpcode[offOpcode + 1],
1124 pVCpu->iem.s.abOpcode[offOpcode + 2],
1125 pVCpu->iem.s.abOpcode[offOpcode + 3],
1126 pVCpu->iem.s.abOpcode[offOpcode + 4],
1127 pVCpu->iem.s.abOpcode[offOpcode + 5],
1128 pVCpu->iem.s.abOpcode[offOpcode + 6],
1129 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1130# endif
1131 }
1132 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1133# endif
1134}
1135
1136#endif /* IEM_WITH_SETJMP */
1137
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette