VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMInlineMem-x86.h@ 108260

Last change on this file since 108260 was 108260, checked in by vboxsync, 3 months ago

VMM/IEM: Splitting up IEMInline.h. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.1 KB
Line 
1/* $Id: IEMInlineMem-x86.h 108260 2025-02-17 15:24:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Memory Functions, x86 target.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineMem_x86_h
29#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineMem_x86_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <iprt/errcore.h>
35
36
37
38
39/** @name Memory access.
40 *
41 * @{
42 */
43
44/**
45 * Checks whether alignment checks are enabled or not.
46 *
47 * @returns true if enabled, false if not.
48 * @param pVCpu The cross context virtual CPU structure of the calling thread.
49 */
50DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
51{
52#if 0
53 AssertCompile(X86_CR0_AM == X86_EFL_AC);
54 return IEM_GET_CPL(pVCpu) == 3
55 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
56#else
57 return RT_BOOL(pVCpu->iem.s.fExec & IEM_F_X86_AC);
58#endif
59}
60
61/**
62 * Checks if the given segment can be written to, raise the appropriate
63 * exception if not.
64 *
65 * @returns VBox strict status code.
66 *
67 * @param pVCpu The cross context virtual CPU structure of the calling thread.
68 * @param pHid Pointer to the hidden register.
69 * @param iSegReg The register number.
70 * @param pu64BaseAddr Where to return the base address to use for the
71 * segment. (In 64-bit code it may differ from the
72 * base in the hidden segment.)
73 */
74DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
75 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
76{
77 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
78
79 if (IEM_IS_64BIT_CODE(pVCpu))
80 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
81 else
82 {
83 if (!pHid->Attr.n.u1Present)
84 {
85 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
86 AssertRelease(uSel == 0);
87 LogEx(LOG_GROUP_IEM,("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
88 return iemRaiseGeneralProtectionFault0(pVCpu);
89 }
90
91 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
92 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
93 && !IEM_IS_64BIT_CODE(pVCpu) )
94 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
95 *pu64BaseAddr = pHid->u64Base;
96 }
97 return VINF_SUCCESS;
98}
99
100
101/**
102 * Checks if the given segment can be read from, raise the appropriate
103 * exception if not.
104 *
105 * @returns VBox strict status code.
106 *
107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
108 * @param pHid Pointer to the hidden register.
109 * @param iSegReg The register number.
110 * @param pu64BaseAddr Where to return the base address to use for the
111 * segment. (In 64-bit code it may differ from the
112 * base in the hidden segment.)
113 */
114DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
115 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
116{
117 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
118
119 if (IEM_IS_64BIT_CODE(pVCpu))
120 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
121 else
122 {
123 if (!pHid->Attr.n.u1Present)
124 {
125 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
126 AssertRelease(uSel == 0);
127 LogEx(LOG_GROUP_IEM,("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
128 return iemRaiseGeneralProtectionFault0(pVCpu);
129 }
130
131 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
132 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
133 *pu64BaseAddr = pHid->u64Base;
134 }
135 return VINF_SUCCESS;
136}
137
138
139#ifdef IEM_WITH_SETJMP
140
141/** @todo slim this down */
142DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
143 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
144{
145 Assert(cbMem >= 1);
146 Assert(iSegReg < X86_SREG_COUNT);
147
148 /*
149 * 64-bit mode is simpler.
150 */
151 if (IEM_IS_64BIT_CODE(pVCpu))
152 {
153 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
154 {
155 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
156 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
157 GCPtrMem += pSel->u64Base;
158 }
159
160 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
161 return GCPtrMem;
162 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
163 }
164 /*
165 * 16-bit and 32-bit segmentation.
166 */
167 else if (iSegReg != UINT8_MAX)
168 {
169 /** @todo Does this apply to segments with 4G-1 limit? */
170 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
171 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
172 {
173 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
174 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
175 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
176 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
177 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
178 | X86_SEL_TYPE_CODE))
179 {
180 case X86DESCATTR_P: /* readonly data, expand up */
181 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
182 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
183 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
184 /* expand up */
185 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
186 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
187 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
188 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
189 break;
190
191 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
192 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
193 /* expand down */
194 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
195 && ( pSel->Attr.n.u1DefBig
196 || GCPtrLast32 <= UINT32_C(0xffff)) ))
197 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
198 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
199 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
200 break;
201
202 default:
203 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
204 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
205 break;
206 }
207 }
208 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
209 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
210 }
211 /*
212 * 32-bit flat address.
213 */
214 else
215 return GCPtrMem;
216}
217
218
219/** @todo slim this down */
220DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
221 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
222{
223 Assert(cbMem >= 1);
224 Assert(iSegReg < X86_SREG_COUNT);
225
226 /*
227 * 64-bit mode is simpler.
228 */
229 if (IEM_IS_64BIT_CODE(pVCpu))
230 {
231 if (iSegReg >= X86_SREG_FS)
232 {
233 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
234 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
235 GCPtrMem += pSel->u64Base;
236 }
237
238 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
239 return GCPtrMem;
240 }
241 /*
242 * 16-bit and 32-bit segmentation.
243 */
244 else
245 {
246 Assert(GCPtrMem <= UINT32_MAX);
247 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
248 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
249 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
250 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
251 if ( fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE) /* data, expand up */
252 /** @todo explore exactly how the CS stuff works in real mode. See also
253 * http://www.rcollins.org/Productivity/DescriptorCache.html and
254 * http://www.rcollins.org/ddj/Aug98/Aug98.html for some insight. */
255 || (iSegReg == X86_SREG_CS && IEM_IS_REAL_OR_V86_MODE(pVCpu)) ) /* Ignored for CS. */ /** @todo testcase! */
256 {
257 /* expand up */
258 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
259 if (RT_LIKELY( GCPtrLast32 <= pSel->u32Limit
260 && GCPtrLast32 >= (uint32_t)GCPtrMem))
261 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
262 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
263 }
264 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
265 {
266 /* expand down - the uppger boundary is defined by the B bit, not G. */
267 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
268 if (RT_LIKELY( (uint32_t)GCPtrMem >= pSel->u32Limit
269 && (pSel->Attr.n.u1DefBig || GCPtrLast32 <= UINT32_C(0xffff))
270 && GCPtrLast32 >= (uint32_t)GCPtrMem))
271 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
272 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
273 }
274 else
275 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
276 }
277 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
278}
279
280#endif /* IEM_WITH_SETJMP */
281
282/**
283 * Fakes a long mode stack selector for SS = 0.
284 *
285 * @param pDescSs Where to return the fake stack descriptor.
286 * @param uDpl The DPL we want.
287 */
288DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
289{
290 pDescSs->Long.au64[0] = 0;
291 pDescSs->Long.au64[1] = 0;
292 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
293 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
294 pDescSs->Long.Gen.u2Dpl = uDpl;
295 pDescSs->Long.Gen.u1Present = 1;
296 pDescSs->Long.Gen.u1Long = 1;
297}
298
299
300/*
301 * Instantiate R/W inline templates.
302 */
303
304/** @def TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
305 * Used to check if an unaligned access is if within the page and won't
306 * trigger an \#AC.
307 *
308 * This can also be used to deal with misaligned accesses on platforms that are
309 * senstive to such if desires.
310 */
311#if 1
312# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) \
313 ( ((a_GCPtrEff) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(a_TmplMemType) \
314 && !((a_pVCpu)->iem.s.fExec & IEM_F_X86_AC) )
315#else
316# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
317#endif
318
319#define TMPL_MEM_WITH_ATOMIC_MAPPING
320
321#define TMPL_MEM_TYPE uint8_t
322#define TMPL_MEM_TYPE_ALIGN 0
323#define TMPL_MEM_TYPE_SIZE 1
324#define TMPL_MEM_FN_SUFF U8
325#define TMPL_MEM_FMT_TYPE "%#04x"
326#define TMPL_MEM_FMT_DESC "byte"
327#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
328
329#define TMPL_MEM_WITH_STACK
330
331#define TMPL_MEM_TYPE uint16_t
332#define TMPL_MEM_TYPE_ALIGN 1
333#define TMPL_MEM_TYPE_SIZE 2
334#define TMPL_MEM_FN_SUFF U16
335#define TMPL_MEM_FMT_TYPE "%#06x"
336#define TMPL_MEM_FMT_DESC "word"
337#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
338
339#define TMPL_WITH_PUSH_SREG
340#define TMPL_MEM_TYPE uint32_t
341#define TMPL_MEM_TYPE_ALIGN 3
342#define TMPL_MEM_TYPE_SIZE 4
343#define TMPL_MEM_FN_SUFF U32
344#define TMPL_MEM_FMT_TYPE "%#010x"
345#define TMPL_MEM_FMT_DESC "dword"
346#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
347#undef TMPL_WITH_PUSH_SREG
348
349#define TMPL_MEM_TYPE uint64_t
350#define TMPL_MEM_TYPE_ALIGN 7
351#define TMPL_MEM_TYPE_SIZE 8
352#define TMPL_MEM_FN_SUFF U64
353#define TMPL_MEM_FMT_TYPE "%#018RX64"
354#define TMPL_MEM_FMT_DESC "qword"
355#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
356
357#undef TMPL_MEM_WITH_STACK
358#undef TMPL_MEM_WITH_ATOMIC_MAPPING
359
360#define TMPL_MEM_NO_MAPPING /* currently sticky */
361
362#define TMPL_MEM_NO_STORE
363#define TMPL_MEM_TYPE uint32_t
364#define TMPL_MEM_TYPE_ALIGN 0
365#define TMPL_MEM_TYPE_SIZE 4
366#define TMPL_MEM_FN_SUFF U32NoAc
367#define TMPL_MEM_FMT_TYPE "%#010x"
368#define TMPL_MEM_FMT_DESC "dword"
369#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
370
371#define TMPL_MEM_NO_STORE
372#define TMPL_MEM_TYPE uint64_t
373#define TMPL_MEM_TYPE_ALIGN 0
374#define TMPL_MEM_TYPE_SIZE 8
375#define TMPL_MEM_FN_SUFF U64NoAc
376#define TMPL_MEM_FMT_TYPE "%#018RX64"
377#define TMPL_MEM_FMT_DESC "qword"
378#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
379
380#define TMPL_MEM_NO_STORE
381#define TMPL_MEM_TYPE uint64_t
382#define TMPL_MEM_TYPE_ALIGN 15
383#define TMPL_MEM_TYPE_SIZE 8
384#define TMPL_MEM_FN_SUFF U64AlignedU128
385#define TMPL_MEM_FMT_TYPE "%#018RX64"
386#define TMPL_MEM_FMT_DESC "qword"
387#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
388
389#undef TMPL_MEM_NO_MAPPING
390
391#define TMPL_MEM_TYPE RTFLOAT80U
392#define TMPL_MEM_TYPE_ALIGN 7
393#define TMPL_MEM_TYPE_SIZE 10
394#define TMPL_MEM_FN_SUFF R80
395#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
396#define TMPL_MEM_FMT_DESC "tword"
397#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
398
399#define TMPL_MEM_TYPE RTPBCD80U
400#define TMPL_MEM_TYPE_ALIGN 7 /** @todo RTPBCD80U alignment testcase */
401#define TMPL_MEM_TYPE_SIZE 10
402#define TMPL_MEM_FN_SUFF D80
403#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
404#define TMPL_MEM_FMT_DESC "tword"
405#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
406
407#define TMPL_MEM_WITH_ATOMIC_MAPPING
408#define TMPL_MEM_TYPE RTUINT128U
409#define TMPL_MEM_TYPE_ALIGN 15
410#define TMPL_MEM_TYPE_SIZE 16
411#define TMPL_MEM_FN_SUFF U128
412#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
413#define TMPL_MEM_FMT_DESC "dqword"
414#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
415#undef TMPL_MEM_WITH_ATOMIC_MAPPING
416
417#define TMPL_MEM_NO_MAPPING
418#define TMPL_MEM_TYPE RTUINT128U
419#define TMPL_MEM_TYPE_ALIGN 0
420#define TMPL_MEM_TYPE_SIZE 16
421#define TMPL_MEM_FN_SUFF U128NoAc
422#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
423#define TMPL_MEM_FMT_DESC "dqword"
424#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
425#undef TMPL_MEM_NO_MAPPING
426
427
428/* Every template relying on unaligned accesses inside a page not being okay should go below. */
429#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
430#define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
431
432#define TMPL_MEM_NO_MAPPING
433#define TMPL_MEM_TYPE RTUINT128U
434#define TMPL_MEM_TYPE_ALIGN 15
435#define TMPL_MEM_TYPE_SIZE 16
436#define TMPL_MEM_FN_SUFF U128AlignedSse
437#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
438#define TMPL_MEM_FMT_DESC "dqword"
439#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
440#undef TMPL_MEM_NO_MAPPING
441
442#define TMPL_MEM_NO_MAPPING
443#define TMPL_MEM_TYPE RTUINT256U
444#define TMPL_MEM_TYPE_ALIGN 0
445#define TMPL_MEM_TYPE_SIZE 32
446#define TMPL_MEM_FN_SUFF U256NoAc
447#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
448#define TMPL_MEM_FMT_DESC "qqword"
449#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
450#undef TMPL_MEM_NO_MAPPING
451
452#define TMPL_MEM_NO_MAPPING
453#define TMPL_MEM_TYPE RTUINT256U
454#define TMPL_MEM_TYPE_ALIGN 31
455#define TMPL_MEM_TYPE_SIZE 32
456#define TMPL_MEM_FN_SUFF U256AlignedAvx
457#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
458#define TMPL_MEM_FMT_DESC "qqword"
459#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
460#undef TMPL_MEM_NO_MAPPING
461
462#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
463
464/** @} */
465
466#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineMem_x86_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette