VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMInlineDecode-x86.h@ 108260

Last change on this file since 108260 was 108260, checked in by vboxsync, 3 months ago

VMM/IEM: Splitting up IEMInline.h. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 36.8 KB
Line 
1/* $Id: IEMInlineDecode-x86.h 108260 2025-02-17 15:24:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Decoding related Functions, x86 target.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineDecode_x86_h
29#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineDecode_x86_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/err.h>
35
36
37#ifndef IEM_WITH_OPAQUE_DECODER_STATE
38
39# ifndef IEM_WITH_SETJMP
40
41/**
42 * Fetches the first opcode byte.
43 *
44 * @returns Strict VBox status code.
45 * @param pVCpu The cross context virtual CPU structure of the
46 * calling thread.
47 * @param pu8 Where to return the opcode byte.
48 */
49DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
50{
51 /*
52 * Check for hardware instruction breakpoints.
53 * Note! Guest breakpoints are only checked after POP SS or MOV SS on AMD CPUs.
54 */
55 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
56 { /* likely */ }
57 else
58 {
59 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
60 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
61 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
62 || IEM_IS_GUEST_CPU_AMD(pVCpu));
63 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
64 { /* likely */ }
65 else
66 {
67 *pu8 = 0xff; /* shut up gcc. sigh */
68 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
69 return iemRaiseDebugException(pVCpu);
70 return rcStrict;
71 }
72 }
73
74 /*
75 * Fetch the first opcode byte.
76 */
77 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
78 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
79 {
80 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
81 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
82 return VINF_SUCCESS;
83 }
84 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
85}
86
87# else /* IEM_WITH_SETJMP */
88
89/**
90 * Fetches the first opcode byte, longjmp on error.
91 *
92 * @returns The opcode byte.
93 * @param pVCpu The cross context virtual CPU structure of the calling thread.
94 */
95DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
96{
97 /*
98 * Check for hardware instruction breakpoints.
99 * Note! Guest breakpoints are only checked after POP SS or MOV SS on AMD CPUs.
100 */
101 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
102 { /* likely */ }
103 else
104 {
105 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
106 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
107 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
108 || IEM_IS_GUEST_CPU_AMD(pVCpu));
109 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
110 { /* likely */ }
111 else
112 {
113 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
114 rcStrict = iemRaiseDebugException(pVCpu);
115 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
116 }
117 }
118
119 /*
120 * Fetch the first opcode byte.
121 */
122# ifdef IEM_WITH_CODE_TLB
123 uint8_t bRet;
124 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
125 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
126 if (RT_LIKELY( pbBuf != NULL
127 && offBuf < pVCpu->iem.s.cbInstrBuf))
128 {
129 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
130 bRet = pbBuf[offBuf];
131 }
132 else
133 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
134# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
135 Assert(pVCpu->iem.s.offOpcode == 0);
136 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
137# endif
138 return bRet;
139
140# else /* !IEM_WITH_CODE_TLB */
141 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
142 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
143 {
144 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
145 return pVCpu->iem.s.abOpcode[offOpcode];
146 }
147 return iemOpcodeGetNextU8SlowJmp(pVCpu);
148# endif
149}
150
151# endif /* IEM_WITH_SETJMP */
152
153/**
154 * Fetches the first opcode byte, returns/throws automatically on failure.
155 *
156 * @param a_pu8 Where to return the opcode byte.
157 * @remark Implicitly references pVCpu.
158 */
159# ifndef IEM_WITH_SETJMP
160# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
161 do \
162 { \
163 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
164 if (rcStrict2 == VINF_SUCCESS) \
165 { /* likely */ } \
166 else \
167 return rcStrict2; \
168 } while (0)
169# else
170# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
171# endif /* IEM_WITH_SETJMP */
172
173
174# ifndef IEM_WITH_SETJMP
175
176/**
177 * Fetches the next opcode byte.
178 *
179 * @returns Strict VBox status code.
180 * @param pVCpu The cross context virtual CPU structure of the
181 * calling thread.
182 * @param pu8 Where to return the opcode byte.
183 */
184DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
185{
186 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
187 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
188 {
189 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
190 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
191 return VINF_SUCCESS;
192 }
193 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
194}
195
196# else /* IEM_WITH_SETJMP */
197
198/**
199 * Fetches the next opcode byte, longjmp on error.
200 *
201 * @returns The opcode byte.
202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
203 */
204DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
205{
206# ifdef IEM_WITH_CODE_TLB
207 uint8_t bRet;
208 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
209 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
210 if (RT_LIKELY( pbBuf != NULL
211 && offBuf < pVCpu->iem.s.cbInstrBuf))
212 {
213 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
214 bRet = pbBuf[offBuf];
215 }
216 else
217 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
218# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
219 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode));
220 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
221# endif
222 return bRet;
223
224# else /* !IEM_WITH_CODE_TLB */
225 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
226 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
227 {
228 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
229 return pVCpu->iem.s.abOpcode[offOpcode];
230 }
231 return iemOpcodeGetNextU8SlowJmp(pVCpu);
232# endif
233}
234
235# endif /* IEM_WITH_SETJMP */
236
237/**
238 * Fetches the next opcode byte, returns automatically on failure.
239 *
240 * @param a_pu8 Where to return the opcode byte.
241 * @remark Implicitly references pVCpu.
242 */
243# ifndef IEM_WITH_SETJMP
244# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
245 do \
246 { \
247 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
248 if (rcStrict2 == VINF_SUCCESS) \
249 { /* likely */ } \
250 else \
251 return rcStrict2; \
252 } while (0)
253# else
254# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
255# endif /* IEM_WITH_SETJMP */
256
257
258# ifndef IEM_WITH_SETJMP
259/**
260 * Fetches the next signed byte from the opcode stream.
261 *
262 * @returns Strict VBox status code.
263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
264 * @param pi8 Where to return the signed byte.
265 */
266DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
267{
268 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
269}
270# endif /* !IEM_WITH_SETJMP */
271
272
273/**
274 * Fetches the next signed byte from the opcode stream, returning automatically
275 * on failure.
276 *
277 * @param a_pi8 Where to return the signed byte.
278 * @remark Implicitly references pVCpu.
279 */
280# ifndef IEM_WITH_SETJMP
281# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
282 do \
283 { \
284 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
285 if (rcStrict2 != VINF_SUCCESS) \
286 return rcStrict2; \
287 } while (0)
288# else /* IEM_WITH_SETJMP */
289# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
290
291# endif /* IEM_WITH_SETJMP */
292
293
294# ifndef IEM_WITH_SETJMP
295/**
296 * Fetches the next signed byte from the opcode stream, extending it to
297 * unsigned 16-bit.
298 *
299 * @returns Strict VBox status code.
300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
301 * @param pu16 Where to return the unsigned word.
302 */
303DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
304{
305 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
306 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
307 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
308
309 *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
310 pVCpu->iem.s.offOpcode = offOpcode + 1;
311 return VINF_SUCCESS;
312}
313# endif /* !IEM_WITH_SETJMP */
314
315/**
316 * Fetches the next signed byte from the opcode stream and sign-extending it to
317 * a word, returning automatically on failure.
318 *
319 * @param a_pu16 Where to return the word.
320 * @remark Implicitly references pVCpu.
321 */
322# ifndef IEM_WITH_SETJMP
323# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
324 do \
325 { \
326 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
327 if (rcStrict2 != VINF_SUCCESS) \
328 return rcStrict2; \
329 } while (0)
330# else
331# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
332# endif
333
334# ifndef IEM_WITH_SETJMP
335/**
336 * Fetches the next signed byte from the opcode stream, extending it to
337 * unsigned 32-bit.
338 *
339 * @returns Strict VBox status code.
340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
341 * @param pu32 Where to return the unsigned dword.
342 */
343DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
344{
345 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
346 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
347 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
348
349 *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
350 pVCpu->iem.s.offOpcode = offOpcode + 1;
351 return VINF_SUCCESS;
352}
353# endif /* !IEM_WITH_SETJMP */
354
355/**
356 * Fetches the next signed byte from the opcode stream and sign-extending it to
357 * a word, returning automatically on failure.
358 *
359 * @param a_pu32 Where to return the word.
360 * @remark Implicitly references pVCpu.
361 */
362# ifndef IEM_WITH_SETJMP
363# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
364 do \
365 { \
366 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
367 if (rcStrict2 != VINF_SUCCESS) \
368 return rcStrict2; \
369 } while (0)
370# else
371# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
372# endif
373
374
375# ifndef IEM_WITH_SETJMP
376/**
377 * Fetches the next signed byte from the opcode stream, extending it to
378 * unsigned 64-bit.
379 *
380 * @returns Strict VBox status code.
381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
382 * @param pu64 Where to return the unsigned qword.
383 */
384DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
385{
386 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
387 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
388 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
389
390 *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
391 pVCpu->iem.s.offOpcode = offOpcode + 1;
392 return VINF_SUCCESS;
393}
394# endif /* !IEM_WITH_SETJMP */
395
396/**
397 * Fetches the next signed byte from the opcode stream and sign-extending it to
398 * a word, returning automatically on failure.
399 *
400 * @param a_pu64 Where to return the word.
401 * @remark Implicitly references pVCpu.
402 */
403# ifndef IEM_WITH_SETJMP
404# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
405 do \
406 { \
407 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
408 if (rcStrict2 != VINF_SUCCESS) \
409 return rcStrict2; \
410 } while (0)
411# else
412# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
413# endif
414
415
416# ifndef IEM_WITH_SETJMP
417
418/**
419 * Fetches the next opcode word.
420 *
421 * @returns Strict VBox status code.
422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
423 * @param pu16 Where to return the opcode word.
424 */
425DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
426{
427 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
428 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
429 {
430 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
432 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
433# else
434 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
435# endif
436 return VINF_SUCCESS;
437 }
438 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
439}
440
441# else /* IEM_WITH_SETJMP */
442
443/**
444 * Fetches the next opcode word, longjmp on error.
445 *
446 * @returns The opcode word.
447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
448 */
449DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
450{
451# ifdef IEM_WITH_CODE_TLB
452 uint16_t u16Ret;
453 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
454 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
455 if (RT_LIKELY( pbBuf != NULL
456 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
457 {
458 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
459# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
460 u16Ret = *(uint16_t const *)&pbBuf[offBuf];
461# else
462 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
463# endif
464 }
465 else
466 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu);
467
468# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
469 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
470 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode));
471# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
472 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret;
473# else
474 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret);
475 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret);
476# endif
477 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2;
478# endif
479
480 return u16Ret;
481
482# else /* !IEM_WITH_CODE_TLB */
483 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
484 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
485 {
486 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
487# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
488 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
489# else
490 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
491# endif
492 }
493 return iemOpcodeGetNextU16SlowJmp(pVCpu);
494# endif /* !IEM_WITH_CODE_TLB */
495}
496
497# endif /* IEM_WITH_SETJMP */
498
499/**
500 * Fetches the next opcode word, returns automatically on failure.
501 *
502 * @param a_pu16 Where to return the opcode word.
503 * @remark Implicitly references pVCpu.
504 */
505# ifndef IEM_WITH_SETJMP
506# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
507 do \
508 { \
509 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
510 if (rcStrict2 != VINF_SUCCESS) \
511 return rcStrict2; \
512 } while (0)
513# else
514# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
515# endif
516
517# ifndef IEM_WITH_SETJMP
518/**
519 * Fetches the next opcode word, zero extending it to a double word.
520 *
521 * @returns Strict VBox status code.
522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
523 * @param pu32 Where to return the opcode double word.
524 */
525DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
526{
527 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
528 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
529 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
530
531 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
532 pVCpu->iem.s.offOpcode = offOpcode + 2;
533 return VINF_SUCCESS;
534}
535# endif /* !IEM_WITH_SETJMP */
536
537/**
538 * Fetches the next opcode word and zero extends it to a double word, returns
539 * automatically on failure.
540 *
541 * @param a_pu32 Where to return the opcode double word.
542 * @remark Implicitly references pVCpu.
543 */
544# ifndef IEM_WITH_SETJMP
545# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
546 do \
547 { \
548 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
549 if (rcStrict2 != VINF_SUCCESS) \
550 return rcStrict2; \
551 } while (0)
552# else
553# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
554# endif
555
556# ifndef IEM_WITH_SETJMP
557/**
558 * Fetches the next opcode word, zero extending it to a quad word.
559 *
560 * @returns Strict VBox status code.
561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
562 * @param pu64 Where to return the opcode quad word.
563 */
564DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
565{
566 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
567 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
568 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
569
570 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
571 pVCpu->iem.s.offOpcode = offOpcode + 2;
572 return VINF_SUCCESS;
573}
574# endif /* !IEM_WITH_SETJMP */
575
576/**
577 * Fetches the next opcode word and zero extends it to a quad word, returns
578 * automatically on failure.
579 *
580 * @param a_pu64 Where to return the opcode quad word.
581 * @remark Implicitly references pVCpu.
582 */
583# ifndef IEM_WITH_SETJMP
584# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
585 do \
586 { \
587 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
588 if (rcStrict2 != VINF_SUCCESS) \
589 return rcStrict2; \
590 } while (0)
591# else
592# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
593# endif
594
595
596# ifndef IEM_WITH_SETJMP
597/**
598 * Fetches the next signed word from the opcode stream.
599 *
600 * @returns Strict VBox status code.
601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
602 * @param pi16 Where to return the signed word.
603 */
604DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
605{
606 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
607}
608# endif /* !IEM_WITH_SETJMP */
609
610
611/**
612 * Fetches the next signed word from the opcode stream, returning automatically
613 * on failure.
614 *
615 * @param a_pi16 Where to return the signed word.
616 * @remark Implicitly references pVCpu.
617 */
618# ifndef IEM_WITH_SETJMP
619# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
620 do \
621 { \
622 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
623 if (rcStrict2 != VINF_SUCCESS) \
624 return rcStrict2; \
625 } while (0)
626# else
627# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
628# endif
629
630# ifndef IEM_WITH_SETJMP
631
632/**
633 * Fetches the next opcode dword.
634 *
635 * @returns Strict VBox status code.
636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
637 * @param pu32 Where to return the opcode double word.
638 */
639DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
640{
641 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
642 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
643 {
644 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
645# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
646 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
647# else
648 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
649 pVCpu->iem.s.abOpcode[offOpcode + 1],
650 pVCpu->iem.s.abOpcode[offOpcode + 2],
651 pVCpu->iem.s.abOpcode[offOpcode + 3]);
652# endif
653 return VINF_SUCCESS;
654 }
655 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
656}
657
658# else /* IEM_WITH_SETJMP */
659
660/**
661 * Fetches the next opcode dword, longjmp on error.
662 *
663 * @returns The opcode dword.
664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
665 */
666DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
667{
668# ifdef IEM_WITH_CODE_TLB
669 uint32_t u32Ret;
670 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
671 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
672 if (RT_LIKELY( pbBuf != NULL
673 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
674 {
675 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
676# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
677 u32Ret = *(uint32_t const *)&pbBuf[offBuf];
678# else
679 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
680 pbBuf[offBuf + 1],
681 pbBuf[offBuf + 2],
682 pbBuf[offBuf + 3]);
683# endif
684 }
685 else
686 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu);
687
688# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
689 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
690 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode));
691# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
692 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret;
693# else
694 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret);
695 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret);
696 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret);
697 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret);
698# endif
699 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4;
700# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
701
702 return u32Ret;
703
704# else /* !IEM_WITH_CODE_TLB */
705 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
706 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
707 {
708 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
709# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
710 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
711# else
712 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
713 pVCpu->iem.s.abOpcode[offOpcode + 1],
714 pVCpu->iem.s.abOpcode[offOpcode + 2],
715 pVCpu->iem.s.abOpcode[offOpcode + 3]);
716# endif
717 }
718 return iemOpcodeGetNextU32SlowJmp(pVCpu);
719# endif
720}
721
722# endif /* IEM_WITH_SETJMP */
723
724/**
725 * Fetches the next opcode dword, returns automatically on failure.
726 *
727 * @param a_pu32 Where to return the opcode dword.
728 * @remark Implicitly references pVCpu.
729 */
730# ifndef IEM_WITH_SETJMP
731# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
732 do \
733 { \
734 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
735 if (rcStrict2 != VINF_SUCCESS) \
736 return rcStrict2; \
737 } while (0)
738# else
739# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
740# endif
741
742# ifndef IEM_WITH_SETJMP
743/**
744 * Fetches the next opcode dword, zero extending it to a quad word.
745 *
746 * @returns Strict VBox status code.
747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
748 * @param pu64 Where to return the opcode quad word.
749 */
750DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
751{
752 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
753 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
754 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
755
756 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
757 pVCpu->iem.s.abOpcode[offOpcode + 1],
758 pVCpu->iem.s.abOpcode[offOpcode + 2],
759 pVCpu->iem.s.abOpcode[offOpcode + 3]);
760 pVCpu->iem.s.offOpcode = offOpcode + 4;
761 return VINF_SUCCESS;
762}
763# endif /* !IEM_WITH_SETJMP */
764
765/**
766 * Fetches the next opcode dword and zero extends it to a quad word, returns
767 * automatically on failure.
768 *
769 * @param a_pu64 Where to return the opcode quad word.
770 * @remark Implicitly references pVCpu.
771 */
772# ifndef IEM_WITH_SETJMP
773# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
774 do \
775 { \
776 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
777 if (rcStrict2 != VINF_SUCCESS) \
778 return rcStrict2; \
779 } while (0)
780# else
781# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
782# endif
783
784
785# ifndef IEM_WITH_SETJMP
786/**
787 * Fetches the next signed double word from the opcode stream.
788 *
789 * @returns Strict VBox status code.
790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
791 * @param pi32 Where to return the signed double word.
792 */
793DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
794{
795 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
796}
797# endif
798
799/**
800 * Fetches the next signed double word from the opcode stream, returning
801 * automatically on failure.
802 *
803 * @param a_pi32 Where to return the signed double word.
804 * @remark Implicitly references pVCpu.
805 */
806# ifndef IEM_WITH_SETJMP
807# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
808 do \
809 { \
810 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
811 if (rcStrict2 != VINF_SUCCESS) \
812 return rcStrict2; \
813 } while (0)
814# else
815# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
816# endif
817
818# ifndef IEM_WITH_SETJMP
819/**
820 * Fetches the next opcode dword, sign extending it into a quad word.
821 *
822 * @returns Strict VBox status code.
823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
824 * @param pu64 Where to return the opcode quad word.
825 */
826DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
827{
828 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
829 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
830 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
831
832 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
833 pVCpu->iem.s.abOpcode[offOpcode + 1],
834 pVCpu->iem.s.abOpcode[offOpcode + 2],
835 pVCpu->iem.s.abOpcode[offOpcode + 3]);
836 *pu64 = (uint64_t)(int64_t)i32;
837 pVCpu->iem.s.offOpcode = offOpcode + 4;
838 return VINF_SUCCESS;
839}
840# endif /* !IEM_WITH_SETJMP */
841
842/**
843 * Fetches the next opcode double word and sign extends it to a quad word,
844 * returns automatically on failure.
845 *
846 * @param a_pu64 Where to return the opcode quad word.
847 * @remark Implicitly references pVCpu.
848 */
849# ifndef IEM_WITH_SETJMP
850# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
851 do \
852 { \
853 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
854 if (rcStrict2 != VINF_SUCCESS) \
855 return rcStrict2; \
856 } while (0)
857# else
858# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
859# endif
860
861# ifndef IEM_WITH_SETJMP
862
863/**
864 * Fetches the next opcode qword.
865 *
866 * @returns Strict VBox status code.
867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
868 * @param pu64 Where to return the opcode qword.
869 */
870DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
871{
872 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
873 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
874 {
875# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
876 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
877# else
878 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
879 pVCpu->iem.s.abOpcode[offOpcode + 1],
880 pVCpu->iem.s.abOpcode[offOpcode + 2],
881 pVCpu->iem.s.abOpcode[offOpcode + 3],
882 pVCpu->iem.s.abOpcode[offOpcode + 4],
883 pVCpu->iem.s.abOpcode[offOpcode + 5],
884 pVCpu->iem.s.abOpcode[offOpcode + 6],
885 pVCpu->iem.s.abOpcode[offOpcode + 7]);
886# endif
887 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
888 return VINF_SUCCESS;
889 }
890 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
891}
892
893# else /* IEM_WITH_SETJMP */
894
895/**
896 * Fetches the next opcode qword, longjmp on error.
897 *
898 * @returns The opcode qword.
899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
900 */
901DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
902{
903# ifdef IEM_WITH_CODE_TLB
904 uint64_t u64Ret;
905 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
906 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
907 if (RT_LIKELY( pbBuf != NULL
908 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
909 {
910 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
911# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
912 u64Ret = *(uint64_t const *)&pbBuf[offBuf];
913# else
914 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
915 pbBuf[offBuf + 1],
916 pbBuf[offBuf + 2],
917 pbBuf[offBuf + 3],
918 pbBuf[offBuf + 4],
919 pbBuf[offBuf + 5],
920 pbBuf[offBuf + 6],
921 pbBuf[offBuf + 7]);
922# endif
923 }
924 else
925 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu);
926
927# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
928 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
929 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode));
930# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
931 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret;
932# else
933 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret);
934 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret);
935 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u64Ret);
936 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u64Ret);
937 pVCpu->iem.s.abOpcode[offOpcode + 4] = RT_BYTE5(u64Ret);
938 pVCpu->iem.s.abOpcode[offOpcode + 5] = RT_BYTE6(u64Ret);
939 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret);
940 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret);
941# endif
942 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8;
943# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
944
945 return u64Ret;
946
947# else /* !IEM_WITH_CODE_TLB */
948 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
949 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
950 {
951 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
952# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
953 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
954# else
955 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
956 pVCpu->iem.s.abOpcode[offOpcode + 1],
957 pVCpu->iem.s.abOpcode[offOpcode + 2],
958 pVCpu->iem.s.abOpcode[offOpcode + 3],
959 pVCpu->iem.s.abOpcode[offOpcode + 4],
960 pVCpu->iem.s.abOpcode[offOpcode + 5],
961 pVCpu->iem.s.abOpcode[offOpcode + 6],
962 pVCpu->iem.s.abOpcode[offOpcode + 7]);
963# endif
964 }
965 return iemOpcodeGetNextU64SlowJmp(pVCpu);
966# endif /* !IEM_WITH_CODE_TLB */
967}
968
969# endif /* IEM_WITH_SETJMP */
970
971/**
972 * Fetches the next opcode quad word, returns automatically on failure.
973 *
974 * @param a_pu64 Where to return the opcode quad word.
975 * @remark Implicitly references pVCpu.
976 */
977# ifndef IEM_WITH_SETJMP
978# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
979 do \
980 { \
981 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
982 if (rcStrict2 != VINF_SUCCESS) \
983 return rcStrict2; \
984 } while (0)
985# else
986# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
987# endif
988
989/**
990 * For fetching the opcode bytes for an ModR/M effective address, but throw
991 * away the result.
992 *
993 * This is used when decoding undefined opcodes and such where we want to avoid
994 * unnecessary MC blocks.
995 *
996 * @note The recompiler code overrides this one so iemOpHlpCalcRmEffAddrJmpEx is
997 * used instead. At least for now...
998 */
999# ifndef IEM_WITH_SETJMP
1000# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1001 RTGCPTR GCPtrEff; \
1002 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \
1003 if (rcStrict != VINF_SUCCESS) \
1004 return rcStrict; \
1005 } while (0)
1006# else
1007# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1008 (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
1009 } while (0)
1010# endif
1011
1012#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1013
1014
1015#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1016
1017/**
1018 * Recalculates the effective operand size.
1019 *
1020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1021 */
1022DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1023{
1024 switch (IEM_GET_CPU_MODE(pVCpu))
1025 {
1026 case IEMMODE_16BIT:
1027 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1028 break;
1029 case IEMMODE_32BIT:
1030 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1031 break;
1032 case IEMMODE_64BIT:
1033 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1034 {
1035 case 0:
1036 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1037 break;
1038 case IEM_OP_PRF_SIZE_OP:
1039 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1040 break;
1041 case IEM_OP_PRF_SIZE_REX_W:
1042 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1043 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1044 break;
1045 }
1046 break;
1047 default:
1048 AssertFailed();
1049 }
1050}
1051
1052
1053/**
1054 * Sets the default operand size to 64-bit and recalculates the effective
1055 * operand size.
1056 *
1057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1058 */
1059DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1060{
1061 Assert(IEM_IS_64BIT_CODE(pVCpu));
1062 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1063 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1064 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1065 else
1066 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1067}
1068
1069
1070/**
1071 * Sets the default operand size to 64-bit and recalculates the effective
1072 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1073 *
1074 * This is for the relative jumps.
1075 *
1076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1077 */
1078DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1079{
1080 Assert(IEM_IS_64BIT_CODE(pVCpu));
1081 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1082 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1083 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1084 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1085 else
1086 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1087}
1088
1089#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1090
1091
1092#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineDecode_x86_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette