VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h@ 107044

Last change on this file since 107044 was 106061, checked in by vboxsync, 2 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 86.3 KB
Line 
1/* $Id: IEMAllMemRWTmplInline.cpp.h 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_SIZE
34# error "TMPL_MEM_TYPE_SIZE is undefined"
35#endif
36#ifndef TMPL_MEM_TYPE_ALIGN
37# error "TMPL_MEM_TYPE_ALIGN is undefined"
38#endif
39#ifndef TMPL_MEM_FN_SUFF
40# error "TMPL_MEM_FN_SUFF is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_TYPE
43# error "TMPL_MEM_FMT_TYPE is undefined"
44#endif
45#ifndef TMPL_MEM_FMT_DESC
46# error "TMPL_MEM_FMT_DESC is undefined"
47#endif
48
49
50/** Helper for checking if @a a_GCPtr is acceptably aligned and fully within
51 * the page for a TMPL_MEM_TYPE. */
52#if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
53# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) \
54 && ((a_GCPtr) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE)) \
55 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
56#else
57# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ \
58 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
59#endif
60
61/**
62 * Values have to be passed by reference if larger than uint64_t.
63 *
64 * This is a restriction of the Visual C++ AMD64 calling convention,
65 * the gcc AMD64 and ARM64 ABIs can easily pass and return to 128-bit via
66 * registers. For larger values like RTUINT256U, Visual C++ AMD and ARM64
67 * passes them by hidden reference, whereas the gcc AMD64 ABI will use stack.
68 *
69 * So, to avoid passing anything on the stack, we just explictly pass values by
70 * reference (pointer) if they are larger than uint64_t. This ASSUMES 64-bit
71 * host.
72 */
73#if TMPL_MEM_TYPE_SIZE > 8
74# define TMPL_MEM_BY_REF
75#else
76# undef TMPL_MEM_BY_REF
77#endif
78
79
80#ifdef IEM_WITH_SETJMP
81
82
83/*********************************************************************************************************************************
84* Fetches *
85*********************************************************************************************************************************/
86
87/**
88 * Inlined fetch function that longjumps on error.
89 *
90 * @note The @a iSegRef is not allowed to be UINT8_MAX!
91 */
92#ifdef TMPL_MEM_BY_REF
93DECL_INLINE_THROW(void)
94RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
95#else
96DECL_INLINE_THROW(TMPL_MEM_TYPE)
97RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
98#endif
99{
100 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
101# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
102 /*
103 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
104 */
105 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
106# if TMPL_MEM_TYPE_SIZE > 1
107 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
108# endif
109 {
110 /*
111 * TLB lookup.
112 */
113 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
114 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
115 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
116 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
117 {
118 /*
119 * Check TLB page table level access flags.
120 */
121 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
122 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
123 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
124 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
125 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
126 {
127 /*
128 * Fetch and return the data.
129 */
130# ifdef IEM_WITH_TLB_STATISTICS
131 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
132# endif
133 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
134 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
135# ifdef TMPL_MEM_BY_REF
136 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
137 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
138 iSegReg, GCPtrMem, GCPtrEff, pValue));
139 return;
140# else
141 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
142 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n",
143 iSegReg, GCPtrMem, GCPtrEff, uRet));
144 return uRet;
145# endif
146 }
147 }
148 }
149
150 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
151 outdated page pointer, or other troubles. (This will do a TLB load.) */
152 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
153# endif
154# ifdef TMPL_MEM_BY_REF
155 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, iSegReg, GCPtrMem);
156# else
157 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
158# endif
159}
160
161
162/**
163 * Inlined flat addressing fetch function that longjumps on error.
164 */
165# ifdef TMPL_MEM_BY_REF
166DECL_INLINE_THROW(void)
167RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
168# else
169DECL_INLINE_THROW(TMPL_MEM_TYPE)
170RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
171# endif
172{
173 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
174 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
175 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
176# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
177 /*
178 * Check that it doesn't cross a page boundrary.
179 */
180# if TMPL_MEM_TYPE_SIZE > 1
181 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
182# endif
183 {
184 /*
185 * TLB lookup.
186 */
187 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
188 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
189 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
190 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
191 {
192 /*
193 * Check TLB page table level access flags.
194 */
195 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
196 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
197 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
198 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
199 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
200 {
201 /*
202 * Fetch and return the dword
203 */
204# ifdef IEM_WITH_TLB_STATISTICS
205 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
206# endif
207 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
208 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
209# ifdef TMPL_MEM_BY_REF
210 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
211 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
212 GCPtrMem, pValue));
213 return;
214# else
215 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
216 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
217 return uRet;
218# endif
219 }
220 }
221 }
222
223 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
224 outdated page pointer, or other troubles. (This will do a TLB load.) */
225 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
226# endif
227# ifdef TMPL_MEM_BY_REF
228 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem);
229# else
230 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
231# endif
232}
233
234
235/*********************************************************************************************************************************
236* Stores *
237*********************************************************************************************************************************/
238# ifndef TMPL_MEM_NO_STORE
239
240/**
241 * Inlined store function that longjumps on error.
242 *
243 * @note The @a iSegRef is not allowed to be UINT8_MAX!
244 */
245DECL_INLINE_THROW(void)
246RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
247# ifdef TMPL_MEM_BY_REF
248 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
249# else
250 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
251# endif
252{
253# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
254 /*
255 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
256 */
257 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
258# if TMPL_MEM_TYPE_SIZE > 1
259 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
260# endif
261 {
262 /*
263 * TLB lookup.
264 */
265 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
266 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
267 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
268 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
269 {
270 /*
271 * Check TLB page table level access flags.
272 */
273 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
274 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
275 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
276 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
277 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
278 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
279 {
280 /*
281 * Store the value and return.
282 */
283# ifdef IEM_WITH_TLB_STATISTICS
284 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
285# endif
286 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
287 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
288# ifdef TMPL_MEM_BY_REF
289 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue;
290 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n",
291 iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
292# else
293 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
294 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n",
295 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
296# endif
297 return;
298 }
299 }
300 }
301
302 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
303 outdated page pointer, or other troubles. (This will do a TLB load.) */
304 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
305# endif
306# ifdef TMPL_MEM_BY_REF
307 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue);
308# else
309 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
310# endif
311}
312
313
314/**
315 * Inlined flat addressing store function that longjumps on error.
316 */
317DECL_INLINE_THROW(void)
318RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
319# ifdef TMPL_MEM_BY_REF
320 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
321# else
322 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
323# endif
324{
325 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
326 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
327 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
328# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
329 /*
330 * Check that it doesn't cross a page boundrary.
331 */
332# if TMPL_MEM_TYPE_SIZE > 1
333 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
334# endif
335 {
336 /*
337 * TLB lookup.
338 */
339 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
340 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
341 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
342 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
343 {
344 /*
345 * Check TLB page table level access flags.
346 */
347 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
348 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
349 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
350 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
351 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
352 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
353 {
354 /*
355 * Store the value and return.
356 */
357# ifdef IEM_WITH_TLB_STATISTICS
358 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
359# endif
360 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
361 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
362# ifdef TMPL_MEM_BY_REF
363 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = *pValue;
364 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
365 GCPtrMem, pValue));
366# else
367 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
368 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
369# endif
370 return;
371 }
372 }
373 }
374
375 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
376 outdated page pointer, or other troubles. (This will do a TLB load.) */
377 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
378# endif
379# ifdef TMPL_MEM_BY_REF
380 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, pValue);
381# else
382 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
383# endif
384}
385
386# endif /* !TMPL_MEM_NO_STORE */
387
388
389/*********************************************************************************************************************************
390* Mapping / Direct Memory Access *
391*********************************************************************************************************************************/
392# ifndef TMPL_MEM_NO_MAPPING
393
394/**
395 * Inlined read-write memory mapping function that longjumps on error.
396 *
397 * Almost identical to RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtJmp).
398 */
399DECL_INLINE_THROW(TMPL_MEM_TYPE *)
400RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
401 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
402{
403# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
404 /*
405 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
406 */
407 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
408# if TMPL_MEM_TYPE_SIZE > 1
409 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
410# endif
411 {
412 /*
413 * TLB lookup.
414 */
415 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
416 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
417 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
418 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
419 {
420 /*
421 * Check TLB page table level access flags.
422 */
423 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
424 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
425 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
426 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
427 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
428 | fNoUser))
429 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
430 {
431 /*
432 * Return the address.
433 */
434# ifdef IEM_WITH_TLB_STATISTICS
435 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
436# endif
437 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
438 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
439 *pbUnmapInfo = 0;
440 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
441 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
442 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
443 }
444 }
445 }
446
447 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
448 outdated page pointer, or other troubles. (This will do a TLB load.) */
449 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
450# endif
451 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
452}
453
454
455/**
456 * Inlined flat read-write memory mapping function that longjumps on error.
457 *
458 * Almost identical to RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,AtJmp).
459 */
460DECL_INLINE_THROW(TMPL_MEM_TYPE *)
461RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
462 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
463{
464# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
465 /*
466 * Check that the address doesn't cross a page boundrary.
467 */
468# if TMPL_MEM_TYPE_SIZE > 1
469 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
470# endif
471 {
472 /*
473 * TLB lookup.
474 */
475 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
476 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
477 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
478 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
479 {
480 /*
481 * Check TLB page table level access flags.
482 */
483 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
484 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
485 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
486 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
487 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
488 | fNoUser))
489 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
490 {
491 /*
492 * Return the address.
493 */
494# ifdef IEM_WITH_TLB_STATISTICS
495 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
496# endif
497 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
498 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
499 *pbUnmapInfo = 0;
500 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
501 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
502 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
503 }
504 }
505 }
506
507 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
508 outdated page pointer, or other troubles. (This will do a TLB load.) */
509 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
510# endif
511 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
512}
513
514# ifdef TMPL_MEM_WITH_ATOMIC_MAPPING
515
516/**
517 * Inlined atomic read-write memory mapping function that longjumps on error.
518 *
519 * Almost identical to RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp).
520 */
521DECL_INLINE_THROW(TMPL_MEM_TYPE *)
522RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
523 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
524{
525# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
526 /*
527 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
528 */
529 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
530# if TMPL_MEM_TYPE_SIZE > 1
531 if (RT_LIKELY(!(GCPtrEff & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */
532# endif
533 {
534 /*
535 * TLB lookup.
536 */
537 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
538 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
539 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
540 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
541 {
542 /*
543 * Check TLB page table level access flags.
544 */
545 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
546 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
547 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
548 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
549 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
550 | fNoUser))
551 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
552 {
553 /*
554 * Return the address.
555 */
556# ifdef IEM_WITH_TLB_STATISTICS
557 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
558# endif
559 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
560 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
561 *pbUnmapInfo = 0;
562 Log7Ex(LOG_GROUP_IEM_MEM,("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
563 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
564 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
565 }
566 }
567 }
568
569 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
570 outdated page pointer, or other troubles. (This will do a TLB load.) */
571 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
572# endif
573 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
574}
575
576
577/**
578 * Inlined flat read-write memory mapping function that longjumps on error.
579 *
580 * Almost identical to RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp).
581 */
582DECL_INLINE_THROW(TMPL_MEM_TYPE *)
583RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,AtJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
584 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
585{
586# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
587 /*
588 * Check that the address doesn't cross a page boundrary.
589 */
590# if TMPL_MEM_TYPE_SIZE > 1
591 if (RT_LIKELY(!(GCPtrMem & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */
592# endif
593 {
594 /*
595 * TLB lookup.
596 */
597 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
598 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
599 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
600 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
601 {
602 /*
603 * Check TLB page table level access flags.
604 */
605 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
606 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
607 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
608 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
609 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
610 | fNoUser))
611 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
612 {
613 /*
614 * Return the address.
615 */
616# ifdef IEM_WITH_TLB_STATISTICS
617 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
618# endif
619 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
620 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
621 *pbUnmapInfo = 0;
622 Log7Ex(LOG_GROUP_IEM_MEM,("IEM AT/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
623 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
624 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
625 }
626 }
627 }
628
629 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
630 outdated page pointer, or other troubles. (This will do a TLB load.) */
631 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
632# endif
633 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
634}
635
636# endif /* TMPL_MEM_WITH_ATOMIC_MAPPING */
637
638/**
639 * Inlined write-only memory mapping function that longjumps on error.
640 */
641DECL_INLINE_THROW(TMPL_MEM_TYPE *)
642RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
643 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
644{
645# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
646 /*
647 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
648 */
649 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
650# if TMPL_MEM_TYPE_SIZE > 1
651 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
652# endif
653 {
654 /*
655 * TLB lookup.
656 */
657 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
658 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
659 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
660 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
661 {
662 /*
663 * Check TLB page table level access flags.
664 */
665 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
666 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
667 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
668 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
669 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
670 | fNoUser))
671 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
672 {
673 /*
674 * Return the address.
675 */
676# ifdef IEM_WITH_TLB_STATISTICS
677 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
678# endif
679 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
680 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
681 *pbUnmapInfo = 0;
682 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
683 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
684 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
685 }
686 }
687 }
688
689 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
690 outdated page pointer, or other troubles. (This will do a TLB load.) */
691 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
692# endif
693 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
694}
695
696
697/**
698 * Inlined flat write-only memory mapping function that longjumps on error.
699 */
700DECL_INLINE_THROW(TMPL_MEM_TYPE *)
701RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
702 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
703{
704# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
705 /*
706 * Check that the address doesn't cross a page boundrary.
707 */
708# if TMPL_MEM_TYPE_SIZE > 1
709 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
710# endif
711 {
712 /*
713 * TLB lookup.
714 */
715 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
716 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
717 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
718 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
719 {
720 /*
721 * Check TLB page table level access flags.
722 */
723 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
724 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
725 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
726 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
727 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
728 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
729 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
730 {
731 /*
732 * Return the address.
733 */
734# ifdef IEM_WITH_TLB_STATISTICS
735 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
736# endif
737 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
738 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
739 *pbUnmapInfo = 0;
740 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
741 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
742 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
743 }
744 }
745 }
746
747 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
748 outdated page pointer, or other troubles. (This will do a TLB load.) */
749 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
750# endif
751 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
752}
753
754
755/**
756 * Inlined read-only memory mapping function that longjumps on error.
757 */
758DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
759RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
760 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
761{
762# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
763 /*
764 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
765 */
766 RTGCPTR const GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
767# if TMPL_MEM_TYPE_SIZE > 1
768 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
769#endif
770 {
771 /*
772 * TLB lookup.
773 */
774 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
775 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
776 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
777 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
778 {
779 /*
780 * Check TLB page table level access flags.
781 */
782 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
783 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
784 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
785 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
786 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
787 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
788 {
789 /*
790 * Return the address.
791 */
792# ifdef IEM_WITH_TLB_STATISTICS
793 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
794# endif
795 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
796 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
797 *pbUnmapInfo = 0;
798 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
799 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
800 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
801 }
802 }
803 }
804
805 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
806 outdated page pointer, or other troubles. (This will do a TLB load.) */
807 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
808# endif
809 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
810}
811
812
813/**
814 * Inlined read-only memory mapping function that longjumps on error.
815 */
816DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
817RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
818 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
819{
820# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
821 /*
822 * Check that the address doesn't cross a page boundrary.
823 */
824# if TMPL_MEM_TYPE_SIZE > 1
825 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
826# endif
827 {
828 /*
829 * TLB lookup.
830 */
831 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
832 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
833 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
834 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
835 {
836 /*
837 * Check TLB page table level access flags.
838 */
839 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
840 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
841 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
842 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
843 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
844 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
845 {
846 /*
847 * Return the address.
848 */
849# ifdef IEM_WITH_TLB_STATISTICS
850 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
851# endif
852 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
853 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
854 *pbUnmapInfo = 0;
855 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
856 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
857 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
858 }
859 }
860 }
861
862 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
863 outdated page pointer, or other troubles. (This will do a TLB load.) */
864 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
865# endif
866 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
867}
868
869# endif /* !TMPL_MEM_NO_MAPPING */
870
871
872/*********************************************************************************************************************************
873* Stack Access *
874*********************************************************************************************************************************/
875# ifdef TMPL_MEM_WITH_STACK
876# if TMPL_MEM_TYPE_SIZE > 8
877# error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK"
878# endif
879# if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
880# error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK"
881# endif
882# ifdef IEM_WITH_SETJMP
883
884/**
885 * Stack store function that longjmps on error.
886 */
887DECL_INLINE_THROW(void)
888RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
889{
890# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
891 /*
892 * Apply segmentation and check that the item doesn't cross a page boundrary.
893 */
894 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
895# if TMPL_MEM_TYPE_SIZE > 1
896 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
897# endif
898 {
899 /*
900 * TLB lookup.
901 */
902 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
903 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
904 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
905 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
906 {
907 /*
908 * Check TLB page table level access flags.
909 */
910 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
911 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
912 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
913 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
914 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
915 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
916 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
917 {
918 /*
919 * Do the store and return.
920 */
921# ifdef IEM_WITH_TLB_STATISTICS
922 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
923# endif
924 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
925 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
926 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
927 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
928 return;
929 }
930 }
931 }
932
933 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
934 outdated page pointer, or other troubles. (This will do a TLB load.) */
935 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
936# endif
937 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
938}
939
940
941# ifdef TMPL_WITH_PUSH_SREG
942/**
943 * Stack segment store function that longjmps on error.
944 *
945 * For a detailed discussion of the behaviour see the fallback functions
946 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
947 */
948DECL_INLINE_THROW(void)
949RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
950 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
951{
952# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
953 /*
954 * Apply segmentation to the address and check that the item doesn't cross
955 * a page boundrary.
956 */
957 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
958# if TMPL_MEM_TYPE_SIZE > 1
959 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
960 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
961# endif
962 {
963 /*
964 * TLB lookup.
965 */
966 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
967 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
968 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
969 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
970 {
971 /*
972 * Check TLB page table level access flags.
973 */
974 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
975 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
976 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
977 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
978 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
979 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
980 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
981 {
982 /*
983 * Do the push and return.
984 */
985# ifdef IEM_WITH_TLB_STATISTICS
986 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
987# endif
988 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
989 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
990 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrEff, uValue));
991 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
992 return;
993 }
994 }
995 }
996
997 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
998 outdated page pointer, or other troubles. (This will do a TLB load.) */
999 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1000# endif
1001 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
1002}
1003# endif /* TMPL_WITH_PUSH_SREG */
1004
1005
1006/**
1007 * Flat stack store function that longjmps on error.
1008 */
1009DECL_INLINE_THROW(void)
1010RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
1011 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1012{
1013 Assert( IEM_IS_64BIT_CODE(pVCpu)
1014 || ( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
1015 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
1016 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
1017 && pVCpu->cpum.GstCtx.ss.u64Base == 0));
1018
1019# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1020 /*
1021 * Check that the item doesn't cross a page boundrary.
1022 */
1023# if TMPL_MEM_TYPE_SIZE > 1
1024 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
1025# endif
1026 {
1027 /*
1028 * TLB lookup.
1029 */
1030 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
1031 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1032 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1033 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1034 {
1035 /*
1036 * Check TLB page table level access flags.
1037 */
1038 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1039 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1040 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1041 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1042 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1043 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1044 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1045 {
1046 /*
1047 * Do the push and return.
1048 */
1049# ifdef IEM_WITH_TLB_STATISTICS
1050 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1051# endif
1052 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1053 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1054 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1055 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
1056 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
1057 return;
1058 }
1059 }
1060 }
1061
1062 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1063 outdated page pointer, or other troubles. (This will do a TLB load.) */
1064 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1065# endif
1066 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
1067}
1068
1069# ifdef TMPL_WITH_PUSH_SREG
1070/**
1071 * Flat stack segment store function that longjmps on error.
1072 *
1073 * For a detailed discussion of the behaviour see the fallback functions
1074 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
1075 */
1076DECL_INLINE_THROW(void)
1077RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
1078 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1079{
1080# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1081 /*
1082 * Check that the item doesn't cross a page boundrary.
1083 */
1084 if (RT_LIKELY( !(GCPtrMem & (sizeof(uint16_t) - 1))
1085 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, uint16_t) ))
1086 {
1087 /*
1088 * TLB lookup.
1089 */
1090 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
1091 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1092 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1093 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1094 {
1095 /*
1096 * Check TLB page table level access flags.
1097 */
1098 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1099 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1100 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1101 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1102 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1103 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1104 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1105 {
1106 /*
1107 * Do the push and return.
1108 */
1109# ifdef IEM_WITH_TLB_STATISTICS
1110 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1111# endif
1112 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1113 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1114 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1115 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
1116 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1117 return;
1118 }
1119 }
1120 }
1121
1122 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1123 outdated page pointer, or other troubles. (This will do a TLB load.) */
1124 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1125# endif
1126 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
1127}
1128# endif /* TMPL_WITH_PUSH_SREG */
1129
1130
1131/**
1132 * Stack fetch function that longjmps on error.
1133 */
1134DECL_INLINE_THROW(TMPL_MEM_TYPE)
1135RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
1136{
1137# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1138 /*
1139 * Apply segmentation to the address and check that the item doesn't cross
1140 * a page boundrary.
1141 */
1142 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
1143# if TMPL_MEM_TYPE_SIZE > 1
1144 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1145# endif
1146 {
1147 /*
1148 * TLB lookup.
1149 */
1150 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
1151 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1152 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1153 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1154 {
1155 /*
1156 * Check TLB page table level access flags.
1157 */
1158 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1159 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1160 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1161 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1162 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1163 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1164 {
1165 /*
1166 * Do the pop.
1167 */
1168# ifdef IEM_WITH_TLB_STATISTICS
1169 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1170# endif
1171 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1172 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1173 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1174 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
1175 return uValue;
1176 }
1177 }
1178 }
1179
1180 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1181 outdated page pointer, or other troubles. (This will do a TLB load.) */
1182 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1183# endif
1184 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
1185}
1186
1187
1188/**
1189 * Flat stack fetch function that longjmps on error.
1190 */
1191DECL_INLINE_THROW(TMPL_MEM_TYPE)
1192RT_CONCAT3(iemMemFlatFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
1193{
1194# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1195 /*
1196 * Check that the item doesn't cross a page boundrary.
1197 */
1198# if TMPL_MEM_TYPE_SIZE > 1
1199 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
1200# endif
1201 {
1202 /*
1203 * TLB lookup.
1204 */
1205 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
1206 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1207 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1208 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1209 {
1210 /*
1211 * Check TLB page table level access flags.
1212 */
1213 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1214 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1215 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1216 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1217 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1218 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1219 {
1220 /*
1221 * Do the pop.
1222 */
1223# ifdef IEM_WITH_TLB_STATISTICS
1224 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1225# endif
1226 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1227 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1228 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
1229 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
1230 return uValue;
1231 }
1232 }
1233 }
1234
1235 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1236 outdated page pointer, or other troubles. (This will do a TLB load.) */
1237 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1238# endif
1239 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
1240}
1241
1242
1243/**
1244 * Stack push function that longjmps on error.
1245 */
1246DECL_INLINE_THROW(void)
1247RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1248{
1249# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1250 /*
1251 * Decrement the stack pointer (prep), apply segmentation and check that
1252 * the item doesn't cross a page boundrary.
1253 */
1254 uint64_t uNewRsp;
1255 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1256 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1257# if TMPL_MEM_TYPE_SIZE > 1
1258 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1259# endif
1260 {
1261 /*
1262 * TLB lookup.
1263 */
1264 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
1265 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1266 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1267 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1268 {
1269 /*
1270 * Check TLB page table level access flags.
1271 */
1272 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1273 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1274 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1275 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1276 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1277 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1278 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1279 {
1280 /*
1281 * Do the push and return.
1282 */
1283# ifdef IEM_WITH_TLB_STATISTICS
1284 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1285# endif
1286 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1287 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1288 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
1289 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1290 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
1291 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1292 return;
1293 }
1294 }
1295 }
1296
1297 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1298 outdated page pointer, or other troubles. (This will do a TLB load.) */
1299 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1300# endif
1301 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1302}
1303
1304
1305/**
1306 * Stack pop greg function that longjmps on error.
1307 */
1308DECL_INLINE_THROW(void)
1309RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1310{
1311 Assert(iGReg < 16);
1312
1313# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1314 /*
1315 * Increment the stack pointer (prep), apply segmentation and check that
1316 * the item doesn't cross a page boundrary.
1317 */
1318 uint64_t uNewRsp;
1319 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1320 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1321# if TMPL_MEM_TYPE_SIZE > 1
1322 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1323# endif
1324 {
1325 /*
1326 * TLB lookup.
1327 */
1328 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
1329 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1330 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1331 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1332 {
1333 /*
1334 * Check TLB page table level access flags.
1335 */
1336 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1337 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1338 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1339 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1340 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1341 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1342 {
1343 /*
1344 * Do the pop.
1345 */
1346# ifdef IEM_WITH_TLB_STATISTICS
1347 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1348# endif
1349 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1350 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1351 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1352 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1353 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
1354 pVCpu->cpum.GstCtx.rsp = uNewRsp; /* must be first for 16-bit */
1355# if TMPL_MEM_TYPE_SIZE == 2
1356 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1357# elif TMPL_MEM_TYPE_SIZE == 4 || TMPL_MEM_TYPE_SIZE == 8
1358 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1359# else
1360# error "TMPL_MEM_TYPE_SIZE"
1361# endif
1362 return;
1363 }
1364 }
1365 }
1366
1367 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1368 outdated page pointer, or other troubles. (This will do a TLB load.) */
1369 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1370# endif
1371 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1372}
1373
1374# ifdef TMPL_WITH_PUSH_SREG
1375/**
1376 * Stack segment push function that longjmps on error.
1377 *
1378 * For a detailed discussion of the behaviour see the fallback functions
1379 * iemMemStackPushUxxSRegSafeJmp.
1380 */
1381DECL_INLINE_THROW(void)
1382RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1383{
1384# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1385 /* See fallback for details on this weirdness: */
1386 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
1387 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE);
1388
1389 /*
1390 * Decrement the stack pointer (prep), apply segmentation and check that
1391 * the item doesn't cross a page boundrary.
1392 */
1393 uint64_t uNewRsp;
1394 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1395 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, cbAccess, GCPtrTop);
1396# if TMPL_MEM_TYPE_SIZE > 1
1397 if (RT_LIKELY( !(GCPtrEff & (cbAccess - 1U))
1398 || ( cbAccess == sizeof(uint16_t)
1399 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t)
1400 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ) ))
1401# endif
1402 {
1403 /*
1404 * TLB lookup.
1405 */
1406 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
1407 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1408 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1409 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1410 {
1411 /*
1412 * Check TLB page table level access flags.
1413 */
1414 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1415 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1416 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1417 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1418 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1419 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1420 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1421 {
1422 /*
1423 * Do the push and return.
1424 */
1425# ifdef IEM_WITH_TLB_STATISTICS
1426 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1427# endif
1428 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1429 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1430 if (cbAccess == sizeof(uint16_t))
1431 {
1432 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RGv (%RX64->%RX64): %#06x [sreg/i]\n",
1433 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, (uint16_t)uValue));
1434 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1435 }
1436 else
1437 {
1438 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1439 if (fIsIntel)
1440 {
1441 Assert(IEM_IS_REAL_MODE(pVCpu));
1442 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
1443 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n",
1444 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1445 }
1446 else
1447 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1448 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1449 *puSlot = uValue;
1450 }
1451 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1452 return;
1453 }
1454 }
1455 }
1456
1457 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1458 outdated page pointer, or other troubles. (This will do a TLB load.) */
1459 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1460# endif
1461 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1462}
1463# endif /* TMPL_WITH_PUSH_SREG */
1464
1465# if TMPL_MEM_TYPE_SIZE != 8
1466
1467/**
1468 * 32-bit flat stack push function that longjmps on error.
1469 */
1470DECL_INLINE_THROW(void)
1471RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1472{
1473 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
1474 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
1475 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
1476 && pVCpu->cpum.GstCtx.ss.u64Base == 0);
1477# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1478 /*
1479 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1480 */
1481 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1482# if TMPL_MEM_TYPE_SIZE > 1
1483 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp)))
1484# endif
1485 {
1486 /*
1487 * TLB lookup.
1488 */
1489 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV((RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1490 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1491 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1492 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1493 {
1494 /*
1495 * Check TLB page table level access flags.
1496 */
1497 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1498 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1499 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1500 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1501 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1502 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1503 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1504 {
1505 /*
1506 * Do the push and return.
1507 */
1508# ifdef IEM_WITH_TLB_STATISTICS
1509 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1510# endif
1511 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1512 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1513 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",
1514 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1515 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1516 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1517 return;
1518 }
1519 }
1520 }
1521
1522 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1523 outdated page pointer, or other troubles. (This will do a TLB load.) */
1524 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1525# endif
1526 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1527}
1528
1529
1530/**
1531 * 32-bit flat stack greg pop function that longjmps on error.
1532 */
1533DECL_INLINE_THROW(void)
1534RT_CONCAT3(iemMemFlat32StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1535{
1536 Assert(iGReg < 16);
1537# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1538 /*
1539 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1540 */
1541 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
1542# if TMPL_MEM_TYPE_SIZE > 1
1543 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp)))
1544# endif
1545 {
1546 /*
1547 * TLB lookup.
1548 */
1549 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV((RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */
1550 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1551 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1552 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1553 {
1554 /*
1555 * Check TLB page table level access flags.
1556 */
1557 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1558 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1559 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1560 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1561 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1562 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1563 {
1564 /*
1565 * Do the pop and update the register values.
1566 */
1567# ifdef IEM_WITH_TLB_STATISTICS
1568 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1569# endif
1570 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1571 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1572 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
1573 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1574# if TMPL_MEM_TYPE_SIZE == 2
1575 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1576# elif TMPL_MEM_TYPE_SIZE == 4
1577 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1578# else
1579# error "TMPL_MEM_TYPE_SIZE"
1580# endif
1581 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1582 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1583 return;
1584 }
1585 }
1586 }
1587
1588 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1589 outdated page pointer, or other troubles. (This will do a TLB load.) */
1590 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
1591# endif
1592 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1593}
1594
1595# endif /* TMPL_MEM_TYPE_SIZE != 8*/
1596
1597# ifdef TMPL_WITH_PUSH_SREG
1598/**
1599 * 32-bit flat stack segment push function that longjmps on error.
1600 *
1601 * For a detailed discussion of the behaviour see the fallback functions
1602 * iemMemStackPushUxxSRegSafeJmp.
1603 */
1604DECL_INLINE_THROW(void)
1605RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1606{
1607# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1608 /* See fallback for details on this weirdness: */
1609 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
1610 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE);
1611
1612 /*
1613 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1614 */
1615 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1616 if (RT_LIKELY( !(uNewEsp & (cbAccess - 1))
1617 || (cbAccess == sizeof(uint16_t)
1618 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t)
1619 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE)) ))
1620 {
1621 /*
1622 * TLB lookup.
1623 */
1624 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV((RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1625 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1626 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1627 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1628 {
1629 /*
1630 * Check TLB page table level access flags.
1631 */
1632 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1633 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1634 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1635 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1636 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1637 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1638 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1639 {
1640 /*
1641 * Do the push and return.
1642 */
1643# ifdef IEM_WITH_TLB_STATISTICS
1644 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1645# endif
1646 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1647 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1648 if (cbAccess == sizeof(uint16_t))
1649 {
1650 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RX32 (<-%RX32): %#06x [sreg/i]\n",
1651 uNewEsp, pVCpu->cpum.GstCtx.esp, (uint16_t)uValue));
1652 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1653 }
1654 else
1655 {
1656 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK];
1657 if (fIsIntel)
1658 {
1659 Assert(IEM_IS_REAL_MODE(pVCpu));
1660 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
1661 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n",
1662 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1663 }
1664 else
1665 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1666 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1667 *puSlot = uValue;
1668 }
1669 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1670 return;
1671 }
1672 }
1673 }
1674
1675 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1676 outdated page pointer, or other troubles. (This will do a TLB load.) */
1677 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1678# endif
1679 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1680}
1681# endif /* TMPL_WITH_PUSH_SREG */
1682
1683# if TMPL_MEM_TYPE_SIZE != 4
1684
1685/**
1686 * 64-bit flat stack push function that longjmps on error.
1687 */
1688DECL_INLINE_THROW(void)
1689RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1690{
1691# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1692 /*
1693 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1694 */
1695 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
1696# if TMPL_MEM_TYPE_SIZE > 1
1697 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp)))
1698# endif
1699 {
1700 /*
1701 * TLB lookup.
1702 */
1703 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uNewRsp);
1704 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1705 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1706 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1707 {
1708 /*
1709 * Check TLB page table level access flags.
1710 */
1711 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1712 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1713 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1714 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1715 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1716 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1717 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1718 {
1719 /*
1720 * Do the push and return.
1721 */
1722# ifdef IEM_WITH_TLB_STATISTICS
1723 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1724# endif
1725 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1726 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1727 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1728 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));
1729 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1730 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1731 return;
1732 }
1733 }
1734 }
1735
1736 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1737 outdated page pointer, or other troubles. (This will do a TLB load.) */
1738 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp));
1739# endif
1740 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1741}
1742
1743
1744/**
1745 * 64-bit flat stack pop function that longjmps on error.
1746 */
1747DECL_INLINE_THROW(void)
1748RT_CONCAT3(iemMemFlat64StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1749{
1750 Assert(iGReg < 16);
1751# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1752 /*
1753 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1754 */
1755 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
1756# if TMPL_MEM_TYPE_SIZE > 1
1757 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp)))
1758# endif
1759 {
1760 /*
1761 * TLB lookup.
1762 */
1763 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uOldRsp);
1764 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1765 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1766 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1767 {
1768 /*
1769 * Check TLB page table level access flags.
1770 */
1771 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1772 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1773 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1774 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1775 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1776 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1777 {
1778 /*
1779 * Do the push and return.
1780 */
1781# ifdef IEM_WITH_TLB_STATISTICS
1782 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1783# endif
1784 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1785 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1786 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
1787 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1788# if TMPL_MEM_TYPE_SIZE == 2
1789 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1790# elif TMPL_MEM_TYPE_SIZE == 8
1791 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1792# else
1793# error "TMPL_MEM_TYPE_SIZE"
1794# endif
1795 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1796 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1797 return;
1798 }
1799 }
1800 }
1801
1802 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1803 outdated page pointer, or other troubles. (This will do a TLB load.) */
1804 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp));
1805# endif
1806 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1807}
1808
1809# endif /* TMPL_MEM_TYPE_SIZE != 4 */
1810
1811# endif /* IEM_WITH_SETJMP */
1812# endif /* TMPL_MEM_WITH_STACK */
1813
1814
1815#endif /* IEM_WITH_SETJMP */
1816
1817#undef TMPL_MEM_TYPE
1818#undef TMPL_MEM_TYPE_ALIGN
1819#undef TMPL_MEM_TYPE_SIZE
1820#undef TMPL_MEM_FN_SUFF
1821#undef TMPL_MEM_FMT_TYPE
1822#undef TMPL_MEM_FMT_DESC
1823#undef TMPL_MEM_NO_STORE
1824#undef TMPL_MEM_ALIGN_CHECK
1825#undef TMPL_MEM_BY_REF
1826
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette