VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h@ 104932

Last change on this file since 104932 was 102977, checked in by vboxsync, 10 months ago

VMM/IEM: Implemented generic fallback for misaligned x86 locking that is not compatible with the host. Using the existing split-lock solution with VINF_EM_EMULATE_SPLIT_LOCK from bugref:10052. We keep ignoring the 'lock' prefix in the recompiler for single CPU VMs (now also on amd64 hosts). bugref:10547

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 81.0 KB
Line 
1/* $Id: IEMAllMemRWTmplInline.cpp.h 102977 2024-01-19 23:11:30Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_SIZE
34# error "TMPL_MEM_TYPE_SIZE is undefined"
35#endif
36#ifndef TMPL_MEM_TYPE_ALIGN
37# error "TMPL_MEM_TYPE_ALIGN is undefined"
38#endif
39#ifndef TMPL_MEM_FN_SUFF
40# error "TMPL_MEM_FN_SUFF is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_TYPE
43# error "TMPL_MEM_FMT_TYPE is undefined"
44#endif
45#ifndef TMPL_MEM_FMT_DESC
46# error "TMPL_MEM_FMT_DESC is undefined"
47#endif
48
49
50/** Helper for checking if @a a_GCPtr is acceptably aligned and fully within
51 * the page for a TMPL_MEM_TYPE. */
52#if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
53# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) \
54 && ((a_GCPtr) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE)) \
55 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
56#else
57# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ \
58 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
59#endif
60
61/**
62 * Values have to be passed by reference if larger than uint64_t.
63 *
64 * This is a restriction of the Visual C++ AMD64 calling convention,
65 * the gcc AMD64 and ARM64 ABIs can easily pass and return to 128-bit via
66 * registers. For larger values like RTUINT256U, Visual C++ AMD and ARM64
67 * passes them by hidden reference, whereas the gcc AMD64 ABI will use stack.
68 *
69 * So, to avoid passing anything on the stack, we just explictly pass values by
70 * reference (pointer) if they are larger than uint64_t. This ASSUMES 64-bit
71 * host.
72 */
73#if TMPL_MEM_TYPE_SIZE > 8
74# define TMPL_MEM_BY_REF
75#else
76# undef TMPL_MEM_BY_REF
77#endif
78
79
80#ifdef IEM_WITH_SETJMP
81
82
83/*********************************************************************************************************************************
84* Fetches *
85*********************************************************************************************************************************/
86
87/**
88 * Inlined fetch function that longjumps on error.
89 *
90 * @note The @a iSegRef is not allowed to be UINT8_MAX!
91 */
92#ifdef TMPL_MEM_BY_REF
93DECL_INLINE_THROW(void)
94RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
95#else
96DECL_INLINE_THROW(TMPL_MEM_TYPE)
97RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
98#endif
99{
100 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
101# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
102 /*
103 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
104 */
105 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
106# if TMPL_MEM_TYPE_SIZE > 1
107 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
108# endif
109 {
110 /*
111 * TLB lookup.
112 */
113 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
114 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
115 if (RT_LIKELY(pTlbe->uTag == uTag))
116 {
117 /*
118 * Check TLB page table level access flags.
119 */
120 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
121 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
122 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
123 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
124 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
125 {
126 /*
127 * Fetch and return the data.
128 */
129 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
130 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
131 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
132# ifdef TMPL_MEM_BY_REF
133 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
134 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
135 iSegReg, GCPtrMem, GCPtrEff, pValue));
136 return;
137# else
138 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
139 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n",
140 iSegReg, GCPtrMem, GCPtrEff, uRet));
141 return uRet;
142# endif
143 }
144 }
145 }
146
147 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
148 outdated page pointer, or other troubles. (This will do a TLB load.) */
149 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
150# endif
151# ifdef TMPL_MEM_BY_REF
152 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, iSegReg, GCPtrMem);
153# else
154 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
155# endif
156}
157
158
159/**
160 * Inlined flat addressing fetch function that longjumps on error.
161 */
162# ifdef TMPL_MEM_BY_REF
163DECL_INLINE_THROW(void)
164RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
165# else
166DECL_INLINE_THROW(TMPL_MEM_TYPE)
167RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
168# endif
169{
170 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
171 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
172 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
173# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
174 /*
175 * Check that it doesn't cross a page boundrary.
176 */
177# if TMPL_MEM_TYPE_SIZE > 1
178 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
179# endif
180 {
181 /*
182 * TLB lookup.
183 */
184 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
185 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
186 if (RT_LIKELY(pTlbe->uTag == uTag))
187 {
188 /*
189 * Check TLB page table level access flags.
190 */
191 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
192 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
193 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
194 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
195 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
196 {
197 /*
198 * Fetch and return the dword
199 */
200 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
201 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
202 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
203# ifdef TMPL_MEM_BY_REF
204 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
205 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
206 GCPtrMem, pValue));
207 return;
208# else
209 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
210 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
211 return uRet;
212# endif
213 }
214 }
215 }
216
217 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
218 outdated page pointer, or other troubles. (This will do a TLB load.) */
219 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
220# endif
221# ifdef TMPL_MEM_BY_REF
222 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem);
223# else
224 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
225# endif
226}
227
228
229/*********************************************************************************************************************************
230* Stores *
231*********************************************************************************************************************************/
232# ifndef TMPL_MEM_NO_STORE
233
234/**
235 * Inlined store function that longjumps on error.
236 *
237 * @note The @a iSegRef is not allowed to be UINT8_MAX!
238 */
239DECL_INLINE_THROW(void)
240RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
241# ifdef TMPL_MEM_BY_REF
242 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
243# else
244 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
245# endif
246{
247# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
248 /*
249 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
250 */
251 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
252# if TMPL_MEM_TYPE_SIZE > 1
253 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
254# endif
255 {
256 /*
257 * TLB lookup.
258 */
259 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
260 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
261 if (RT_LIKELY(pTlbe->uTag == uTag))
262 {
263 /*
264 * Check TLB page table level access flags.
265 */
266 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
267 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
268 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
269 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
270 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
271 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
272 {
273 /*
274 * Store the value and return.
275 */
276 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
277 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
278 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
279# ifdef TMPL_MEM_BY_REF
280 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue;
281 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n",
282 iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
283# else
284 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
285 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n",
286 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
287# endif
288 return;
289 }
290 }
291 }
292
293 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
294 outdated page pointer, or other troubles. (This will do a TLB load.) */
295 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
296# endif
297# ifdef TMPL_MEM_BY_REF
298 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue);
299# else
300 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
301# endif
302}
303
304
305/**
306 * Inlined flat addressing store function that longjumps on error.
307 */
308DECL_INLINE_THROW(void)
309RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
310# ifdef TMPL_MEM_BY_REF
311 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
312# else
313 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
314# endif
315{
316 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
317 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
318 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
319# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
320 /*
321 * Check that it doesn't cross a page boundrary.
322 */
323# if TMPL_MEM_TYPE_SIZE > 1
324 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
325# endif
326 {
327 /*
328 * TLB lookup.
329 */
330 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
331 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
332 if (RT_LIKELY(pTlbe->uTag == uTag))
333 {
334 /*
335 * Check TLB page table level access flags.
336 */
337 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
338 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
339 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
340 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
341 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
342 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
343 {
344 /*
345 * Store the value and return.
346 */
347 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
348 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
349 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
350# ifdef TMPL_MEM_BY_REF
351 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = *pValue;
352 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
353 GCPtrMem, pValue));
354# else
355 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
356 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
357# endif
358 return;
359 }
360 }
361 }
362
363 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
364 outdated page pointer, or other troubles. (This will do a TLB load.) */
365 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
366# endif
367# ifdef TMPL_MEM_BY_REF
368 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, pValue);
369# else
370 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
371# endif
372}
373
374# endif /* !TMPL_MEM_NO_STORE */
375
376
377/*********************************************************************************************************************************
378* Mapping / Direct Memory Access *
379*********************************************************************************************************************************/
380# ifndef TMPL_MEM_NO_MAPPING
381
382/**
383 * Inlined read-write memory mapping function that longjumps on error.
384 *
385 * Almost identical to RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtJmp).
386 */
387DECL_INLINE_THROW(TMPL_MEM_TYPE *)
388RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
389 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
390{
391# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
392 /*
393 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
394 */
395 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
396# if TMPL_MEM_TYPE_SIZE > 1
397 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
398# endif
399 {
400 /*
401 * TLB lookup.
402 */
403 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
404 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
405 if (RT_LIKELY(pTlbe->uTag == uTag))
406 {
407 /*
408 * Check TLB page table level access flags.
409 */
410 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
411 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
412 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
413 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
414 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
415 | fNoUser))
416 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
417 {
418 /*
419 * Return the address.
420 */
421 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
422 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
423 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
424 *pbUnmapInfo = 0;
425 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
426 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
427 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
428 }
429 }
430 }
431
432 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
433 outdated page pointer, or other troubles. (This will do a TLB load.) */
434 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
435# endif
436 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
437}
438
439
440/**
441 * Inlined flat read-write memory mapping function that longjumps on error.
442 *
443 * Almost identical to RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,AtJmp).
444 */
445DECL_INLINE_THROW(TMPL_MEM_TYPE *)
446RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
447 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
448{
449# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
450 /*
451 * Check that the address doesn't cross a page boundrary.
452 */
453# if TMPL_MEM_TYPE_SIZE > 1
454 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
455# endif
456 {
457 /*
458 * TLB lookup.
459 */
460 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
461 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
462 if (RT_LIKELY(pTlbe->uTag == uTag))
463 {
464 /*
465 * Check TLB page table level access flags.
466 */
467 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
468 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
469 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
470 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
471 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
472 | fNoUser))
473 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
474 {
475 /*
476 * Return the address.
477 */
478 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
479 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
480 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
481 *pbUnmapInfo = 0;
482 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
483 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
484 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
485 }
486 }
487 }
488
489 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
490 outdated page pointer, or other troubles. (This will do a TLB load.) */
491 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
492# endif
493 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
494}
495
496# ifdef TMPL_MEM_WITH_ATOMIC_MAPPING
497
498/**
499 * Inlined atomic read-write memory mapping function that longjumps on error.
500 *
501 * Almost identical to RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp).
502 */
503DECL_INLINE_THROW(TMPL_MEM_TYPE *)
504RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
505 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
506{
507# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
508 /*
509 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
510 */
511 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
512# if TMPL_MEM_TYPE_SIZE > 1
513 if (RT_LIKELY(!(GCPtrEff & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */
514# endif
515 {
516 /*
517 * TLB lookup.
518 */
519 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
520 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
521 if (RT_LIKELY(pTlbe->uTag == uTag))
522 {
523 /*
524 * Check TLB page table level access flags.
525 */
526 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
527 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
528 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
529 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
530 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
531 | fNoUser))
532 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
533 {
534 /*
535 * Return the address.
536 */
537 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
538 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
539 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
540 *pbUnmapInfo = 0;
541 Log7Ex(LOG_GROUP_IEM_MEM,("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
542 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
543 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
544 }
545 }
546 }
547
548 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
549 outdated page pointer, or other troubles. (This will do a TLB load.) */
550 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
551# endif
552 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
553}
554
555
556/**
557 * Inlined flat read-write memory mapping function that longjumps on error.
558 *
559 * Almost identical to RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp).
560 */
561DECL_INLINE_THROW(TMPL_MEM_TYPE *)
562RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,AtJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
563 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
564{
565# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
566 /*
567 * Check that the address doesn't cross a page boundrary.
568 */
569# if TMPL_MEM_TYPE_SIZE > 1
570 if (RT_LIKELY(!(GCPtrMem & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */
571# endif
572 {
573 /*
574 * TLB lookup.
575 */
576 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
577 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
578 if (RT_LIKELY(pTlbe->uTag == uTag))
579 {
580 /*
581 * Check TLB page table level access flags.
582 */
583 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
584 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
585 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
586 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
587 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
588 | fNoUser))
589 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
590 {
591 /*
592 * Return the address.
593 */
594 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
595 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
596 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
597 *pbUnmapInfo = 0;
598 Log7Ex(LOG_GROUP_IEM_MEM,("IEM AT/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
599 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
600 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
601 }
602 }
603 }
604
605 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
606 outdated page pointer, or other troubles. (This will do a TLB load.) */
607 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
608# endif
609 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
610}
611
612# endif /* TMPL_MEM_WITH_ATOMIC_MAPPING */
613
614/**
615 * Inlined write-only memory mapping function that longjumps on error.
616 */
617DECL_INLINE_THROW(TMPL_MEM_TYPE *)
618RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
619 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
620{
621# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
622 /*
623 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
624 */
625 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
626# if TMPL_MEM_TYPE_SIZE > 1
627 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
628# endif
629 {
630 /*
631 * TLB lookup.
632 */
633 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
634 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
635 if (RT_LIKELY(pTlbe->uTag == uTag))
636 {
637 /*
638 * Check TLB page table level access flags.
639 */
640 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
641 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
642 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
643 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
644 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
645 | fNoUser))
646 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
647 {
648 /*
649 * Return the address.
650 */
651 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
652 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
653 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
654 *pbUnmapInfo = 0;
655 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
656 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
657 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
658 }
659 }
660 }
661
662 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
663 outdated page pointer, or other troubles. (This will do a TLB load.) */
664 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
665# endif
666 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
667}
668
669
670/**
671 * Inlined flat write-only memory mapping function that longjumps on error.
672 */
673DECL_INLINE_THROW(TMPL_MEM_TYPE *)
674RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
675 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
676{
677# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
678 /*
679 * Check that the address doesn't cross a page boundrary.
680 */
681# if TMPL_MEM_TYPE_SIZE > 1
682 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
683# endif
684 {
685 /*
686 * TLB lookup.
687 */
688 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
689 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
690 if (RT_LIKELY(pTlbe->uTag == uTag))
691 {
692 /*
693 * Check TLB page table level access flags.
694 */
695 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
696 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
697 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
698 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
699 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
700 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
701 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
702 {
703 /*
704 * Return the address.
705 */
706 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
707 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
708 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
709 *pbUnmapInfo = 0;
710 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
711 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
712 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
713 }
714 }
715 }
716
717 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
718 outdated page pointer, or other troubles. (This will do a TLB load.) */
719 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
720# endif
721 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
722}
723
724
725/**
726 * Inlined read-only memory mapping function that longjumps on error.
727 */
728DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
729RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
730 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
731{
732# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
733 /*
734 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
735 */
736 RTGCPTR const GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
737# if TMPL_MEM_TYPE_SIZE > 1
738 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
739#endif
740 {
741 /*
742 * TLB lookup.
743 */
744 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
745 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
746 if (RT_LIKELY(pTlbe->uTag == uTag))
747 {
748 /*
749 * Check TLB page table level access flags.
750 */
751 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
752 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
753 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
754 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
755 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
756 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
757 {
758 /*
759 * Return the address.
760 */
761 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
762 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
763 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
764 *pbUnmapInfo = 0;
765 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
766 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
767 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
768 }
769 }
770 }
771
772 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
773 outdated page pointer, or other troubles. (This will do a TLB load.) */
774 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
775# endif
776 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
777}
778
779
780/**
781 * Inlined read-only memory mapping function that longjumps on error.
782 */
783DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
784RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
785 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
786{
787# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
788 /*
789 * Check that the address doesn't cross a page boundrary.
790 */
791# if TMPL_MEM_TYPE_SIZE > 1
792 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
793# endif
794 {
795 /*
796 * TLB lookup.
797 */
798 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
799 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
800 if (RT_LIKELY(pTlbe->uTag == uTag))
801 {
802 /*
803 * Check TLB page table level access flags.
804 */
805 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
806 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
807 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
808 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
809 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
810 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
811 {
812 /*
813 * Return the address.
814 */
815 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
816 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
817 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
818 *pbUnmapInfo = 0;
819 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
820 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
821 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
822 }
823 }
824 }
825
826 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
827 outdated page pointer, or other troubles. (This will do a TLB load.) */
828 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
829# endif
830 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
831}
832
833# endif /* !TMPL_MEM_NO_MAPPING */
834
835
836/*********************************************************************************************************************************
837* Stack Access *
838*********************************************************************************************************************************/
839# ifdef TMPL_MEM_WITH_STACK
840# if TMPL_MEM_TYPE_SIZE > 8
841# error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK"
842# endif
843# if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
844# error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK"
845# endif
846# ifdef IEM_WITH_SETJMP
847
848/**
849 * Stack store function that longjmps on error.
850 */
851DECL_INLINE_THROW(void)
852RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
853{
854# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
855 /*
856 * Apply segmentation and check that the item doesn't cross a page boundrary.
857 */
858 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
859# if TMPL_MEM_TYPE_SIZE > 1
860 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
861# endif
862 {
863 /*
864 * TLB lookup.
865 */
866 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
867 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
868 if (RT_LIKELY(pTlbe->uTag == uTag))
869 {
870 /*
871 * Check TLB page table level access flags.
872 */
873 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
874 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
875 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
876 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
877 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
878 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
879 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
880 {
881 /*
882 * Do the store and return.
883 */
884 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
885 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
886 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
887 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
888 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
889 return;
890 }
891 }
892 }
893
894 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
895 outdated page pointer, or other troubles. (This will do a TLB load.) */
896 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
897# endif
898 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
899}
900
901
902# ifdef TMPL_WITH_PUSH_SREG
903/**
904 * Stack segment store function that longjmps on error.
905 *
906 * For a detailed discussion of the behaviour see the fallback functions
907 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
908 */
909DECL_INLINE_THROW(void)
910RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
911 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
912{
913# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
914 /*
915 * Apply segmentation to the address and check that the item doesn't cross
916 * a page boundrary.
917 */
918 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
919# if TMPL_MEM_TYPE_SIZE > 1
920 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
921 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
922# endif
923 {
924 /*
925 * TLB lookup.
926 */
927 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
928 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
929 if (RT_LIKELY(pTlbe->uTag == uTag))
930 {
931 /*
932 * Check TLB page table level access flags.
933 */
934 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
935 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
936 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
937 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
938 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
939 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
940 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
941 {
942 /*
943 * Do the push and return.
944 */
945 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
946 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
947 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
948 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrEff, uValue));
949 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
950 return;
951 }
952 }
953 }
954
955 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
956 outdated page pointer, or other troubles. (This will do a TLB load.) */
957 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
958# endif
959 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
960}
961# endif /* TMPL_WITH_PUSH_SREG */
962
963
964/**
965 * Flat stack store function that longjmps on error.
966 */
967DECL_INLINE_THROW(void)
968RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
969 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
970{
971 Assert( IEM_IS_64BIT_CODE(pVCpu)
972 || ( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
973 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
974 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
975 && pVCpu->cpum.GstCtx.ss.u64Base == 0));
976
977# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
978 /*
979 * Check that the item doesn't cross a page boundrary.
980 */
981# if TMPL_MEM_TYPE_SIZE > 1
982 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
983# endif
984 {
985 /*
986 * TLB lookup.
987 */
988 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
989 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
990 if (RT_LIKELY(pTlbe->uTag == uTag))
991 {
992 /*
993 * Check TLB page table level access flags.
994 */
995 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
996 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
997 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
998 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
999 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1000 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1001 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1002 {
1003 /*
1004 * Do the push and return.
1005 */
1006 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1007 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1008 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1009 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1010 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
1011 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
1012 return;
1013 }
1014 }
1015 }
1016
1017 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1018 outdated page pointer, or other troubles. (This will do a TLB load.) */
1019 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1020# endif
1021 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
1022}
1023
1024# ifdef TMPL_WITH_PUSH_SREG
1025/**
1026 * Flat stack segment store function that longjmps on error.
1027 *
1028 * For a detailed discussion of the behaviour see the fallback functions
1029 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
1030 */
1031DECL_INLINE_THROW(void)
1032RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
1033 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1034{
1035# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1036 /*
1037 * Check that the item doesn't cross a page boundrary.
1038 */
1039 if (RT_LIKELY( !(GCPtrMem & (sizeof(uint16_t) - 1))
1040 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, uint16_t) ))
1041 {
1042 /*
1043 * TLB lookup.
1044 */
1045 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
1046 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1047 if (RT_LIKELY(pTlbe->uTag == uTag))
1048 {
1049 /*
1050 * Check TLB page table level access flags.
1051 */
1052 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1053 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1054 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1055 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1056 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1057 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1058 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1059 {
1060 /*
1061 * Do the push and return.
1062 */
1063 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1064 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1065 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1066 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1067 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
1068 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1069 return;
1070 }
1071 }
1072 }
1073
1074 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1075 outdated page pointer, or other troubles. (This will do a TLB load.) */
1076 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1077# endif
1078 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
1079}
1080# endif /* TMPL_WITH_PUSH_SREG */
1081
1082
1083/**
1084 * Stack fetch function that longjmps on error.
1085 */
1086DECL_INLINE_THROW(TMPL_MEM_TYPE)
1087RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
1088{
1089# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1090 /*
1091 * Apply segmentation to the address and check that the item doesn't cross
1092 * a page boundrary.
1093 */
1094 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
1095# if TMPL_MEM_TYPE_SIZE > 1
1096 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1097# endif
1098 {
1099 /*
1100 * TLB lookup.
1101 */
1102 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
1103 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1104 if (RT_LIKELY(pTlbe->uTag == uTag))
1105 {
1106 /*
1107 * Check TLB page table level access flags.
1108 */
1109 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1110 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1111 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1112 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1113 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1114 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1115 {
1116 /*
1117 * Do the pop.
1118 */
1119 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1120 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1121 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1122 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1123 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
1124 return uValue;
1125 }
1126 }
1127 }
1128
1129 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1130 outdated page pointer, or other troubles. (This will do a TLB load.) */
1131 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1132# endif
1133 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
1134}
1135
1136
1137/**
1138 * Flat stack fetch function that longjmps on error.
1139 */
1140DECL_INLINE_THROW(TMPL_MEM_TYPE)
1141RT_CONCAT3(iemMemFlatFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
1142{
1143# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1144 /*
1145 * Check that the item doesn't cross a page boundrary.
1146 */
1147# if TMPL_MEM_TYPE_SIZE > 1
1148 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
1149# endif
1150 {
1151 /*
1152 * TLB lookup.
1153 */
1154 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
1155 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1156 if (RT_LIKELY(pTlbe->uTag == uTag))
1157 {
1158 /*
1159 * Check TLB page table level access flags.
1160 */
1161 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1162 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1163 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1164 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1165 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1166 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1167 {
1168 /*
1169 * Do the pop.
1170 */
1171 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1172 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1173 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1174 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
1175 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
1176 return uValue;
1177 }
1178 }
1179 }
1180
1181 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1182 outdated page pointer, or other troubles. (This will do a TLB load.) */
1183 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1184# endif
1185 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
1186}
1187
1188
1189/**
1190 * Stack push function that longjmps on error.
1191 */
1192DECL_INLINE_THROW(void)
1193RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1194{
1195# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1196 /*
1197 * Decrement the stack pointer (prep), apply segmentation and check that
1198 * the item doesn't cross a page boundrary.
1199 */
1200 uint64_t uNewRsp;
1201 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1202 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1203# if TMPL_MEM_TYPE_SIZE > 1
1204 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1205# endif
1206 {
1207 /*
1208 * TLB lookup.
1209 */
1210 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
1211 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1212 if (RT_LIKELY(pTlbe->uTag == uTag))
1213 {
1214 /*
1215 * Check TLB page table level access flags.
1216 */
1217 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1218 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1219 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1220 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1221 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1222 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1223 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1224 {
1225 /*
1226 * Do the push and return.
1227 */
1228 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1229 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1230 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1231 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
1232 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1233 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
1234 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1235 return;
1236 }
1237 }
1238 }
1239
1240 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1241 outdated page pointer, or other troubles. (This will do a TLB load.) */
1242 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1243# endif
1244 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1245}
1246
1247
1248/**
1249 * Stack pop greg function that longjmps on error.
1250 */
1251DECL_INLINE_THROW(void)
1252RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1253{
1254 Assert(iGReg < 16);
1255
1256# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1257 /*
1258 * Increment the stack pointer (prep), apply segmentation and check that
1259 * the item doesn't cross a page boundrary.
1260 */
1261 uint64_t uNewRsp;
1262 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1263 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1264# if TMPL_MEM_TYPE_SIZE > 1
1265 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1266# endif
1267 {
1268 /*
1269 * TLB lookup.
1270 */
1271 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
1272 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1273 if (RT_LIKELY(pTlbe->uTag == uTag))
1274 {
1275 /*
1276 * Check TLB page table level access flags.
1277 */
1278 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1279 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1280 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1281 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1282 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1283 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1284 {
1285 /*
1286 * Do the pop.
1287 */
1288 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1289 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1290 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1291 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1292 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1293 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
1294 pVCpu->cpum.GstCtx.rsp = uNewRsp; /* must be first for 16-bit */
1295# if TMPL_MEM_TYPE_SIZE == 2
1296 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1297# elif TMPL_MEM_TYPE_SIZE == 4 || TMPL_MEM_TYPE_SIZE == 8
1298 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1299# else
1300# error "TMPL_MEM_TYPE_SIZE"
1301# endif
1302 return;
1303 }
1304 }
1305 }
1306
1307 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1308 outdated page pointer, or other troubles. (This will do a TLB load.) */
1309 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1310# endif
1311 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1312}
1313
1314# ifdef TMPL_WITH_PUSH_SREG
1315/**
1316 * Stack segment push function that longjmps on error.
1317 *
1318 * For a detailed discussion of the behaviour see the fallback functions
1319 * iemMemStackPushUxxSRegSafeJmp.
1320 */
1321DECL_INLINE_THROW(void)
1322RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1323{
1324# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1325 /* See fallback for details on this weirdness: */
1326 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
1327 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE);
1328
1329 /*
1330 * Decrement the stack pointer (prep), apply segmentation and check that
1331 * the item doesn't cross a page boundrary.
1332 */
1333 uint64_t uNewRsp;
1334 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1335 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, cbAccess, GCPtrTop);
1336# if TMPL_MEM_TYPE_SIZE > 1
1337 if (RT_LIKELY( !(GCPtrEff & (cbAccess - 1U))
1338 || ( cbAccess == sizeof(uint16_t)
1339 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t)
1340 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ) ))
1341# endif
1342 {
1343 /*
1344 * TLB lookup.
1345 */
1346 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
1347 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1348 if (RT_LIKELY(pTlbe->uTag == uTag))
1349 {
1350 /*
1351 * Check TLB page table level access flags.
1352 */
1353 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1354 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1355 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1356 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1357 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1358 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1359 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1360 {
1361 /*
1362 * Do the push and return.
1363 */
1364 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1365 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1366 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1367 if (cbAccess == sizeof(uint16_t))
1368 {
1369 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RGv (%RX64->%RX64): %#06x [sreg/i]\n",
1370 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, (uint16_t)uValue));
1371 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1372 }
1373 else
1374 {
1375 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1376 if (fIsIntel)
1377 {
1378 Assert(IEM_IS_REAL_MODE(pVCpu));
1379 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
1380 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n",
1381 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1382 }
1383 else
1384 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1385 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1386 *puSlot = uValue;
1387 }
1388 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1389 return;
1390 }
1391 }
1392 }
1393
1394 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1395 outdated page pointer, or other troubles. (This will do a TLB load.) */
1396 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1397# endif
1398 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1399}
1400# endif /* TMPL_WITH_PUSH_SREG */
1401
1402# if TMPL_MEM_TYPE_SIZE != 8
1403
1404/**
1405 * 32-bit flat stack push function that longjmps on error.
1406 */
1407DECL_INLINE_THROW(void)
1408RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1409{
1410 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
1411 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
1412 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
1413 && pVCpu->cpum.GstCtx.ss.u64Base == 0);
1414# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1415 /*
1416 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1417 */
1418 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1419# if TMPL_MEM_TYPE_SIZE > 1
1420 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp)))
1421# endif
1422 {
1423 /*
1424 * TLB lookup.
1425 */
1426 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1427 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1428 if (RT_LIKELY(pTlbe->uTag == uTag))
1429 {
1430 /*
1431 * Check TLB page table level access flags.
1432 */
1433 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1434 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1435 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1436 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1437 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1438 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1439 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1440 {
1441 /*
1442 * Do the push and return.
1443 */
1444 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1445 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1446 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1447 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",
1448 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1449 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1450 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1451 return;
1452 }
1453 }
1454 }
1455
1456 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1457 outdated page pointer, or other troubles. (This will do a TLB load.) */
1458 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1459# endif
1460 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1461}
1462
1463
1464/**
1465 * 32-bit flat stack greg pop function that longjmps on error.
1466 */
1467DECL_INLINE_THROW(void)
1468RT_CONCAT3(iemMemFlat32StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1469{
1470 Assert(iGReg < 16);
1471# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1472 /*
1473 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1474 */
1475 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
1476# if TMPL_MEM_TYPE_SIZE > 1
1477 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp)))
1478# endif
1479 {
1480 /*
1481 * TLB lookup.
1482 */
1483 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */
1484 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1485 if (RT_LIKELY(pTlbe->uTag == uTag))
1486 {
1487 /*
1488 * Check TLB page table level access flags.
1489 */
1490 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1491 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1492 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1493 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1494 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1495 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1496 {
1497 /*
1498 * Do the pop and update the register values.
1499 */
1500 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1501 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1502 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1503 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
1504 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1505# if TMPL_MEM_TYPE_SIZE == 2
1506 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1507# elif TMPL_MEM_TYPE_SIZE == 4
1508 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1509# else
1510# error "TMPL_MEM_TYPE_SIZE"
1511# endif
1512 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1513 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1514 return;
1515 }
1516 }
1517 }
1518
1519 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1520 outdated page pointer, or other troubles. (This will do a TLB load.) */
1521 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
1522# endif
1523 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1524}
1525
1526# endif /* TMPL_MEM_TYPE_SIZE != 8*/
1527
1528# ifdef TMPL_WITH_PUSH_SREG
1529/**
1530 * 32-bit flat stack segment push function that longjmps on error.
1531 *
1532 * For a detailed discussion of the behaviour see the fallback functions
1533 * iemMemStackPushUxxSRegSafeJmp.
1534 */
1535DECL_INLINE_THROW(void)
1536RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1537{
1538# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1539 /* See fallback for details on this weirdness: */
1540 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
1541 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE);
1542
1543 /*
1544 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1545 */
1546 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1547 if (RT_LIKELY( !(uNewEsp & (cbAccess - 1))
1548 || (cbAccess == sizeof(uint16_t)
1549 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t)
1550 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE)) ))
1551 {
1552 /*
1553 * TLB lookup.
1554 */
1555 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1556 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1557 if (RT_LIKELY(pTlbe->uTag == uTag))
1558 {
1559 /*
1560 * Check TLB page table level access flags.
1561 */
1562 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1563 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1564 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1565 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1566 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1567 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1568 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1569 {
1570 /*
1571 * Do the push and return.
1572 */
1573 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1574 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1575 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1576 if (cbAccess == sizeof(uint16_t))
1577 {
1578 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RX32 (<-%RX32): %#06x [sreg/i]\n",
1579 uNewEsp, pVCpu->cpum.GstCtx.esp, (uint16_t)uValue));
1580 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1581 }
1582 else
1583 {
1584 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK];
1585 if (fIsIntel)
1586 {
1587 Assert(IEM_IS_REAL_MODE(pVCpu));
1588 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
1589 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n",
1590 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1591 }
1592 else
1593 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1594 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1595 *puSlot = uValue;
1596 }
1597 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1598 return;
1599 }
1600 }
1601 }
1602
1603 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1604 outdated page pointer, or other troubles. (This will do a TLB load.) */
1605 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1606# endif
1607 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1608}
1609# endif /* TMPL_WITH_PUSH_SREG */
1610
1611# if TMPL_MEM_TYPE_SIZE != 4
1612
1613/**
1614 * 64-bit flat stack push function that longjmps on error.
1615 */
1616DECL_INLINE_THROW(void)
1617RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1618{
1619# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1620 /*
1621 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1622 */
1623 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
1624# if TMPL_MEM_TYPE_SIZE > 1
1625 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp)))
1626# endif
1627 {
1628 /*
1629 * TLB lookup.
1630 */
1631 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uNewRsp);
1632 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1633 if (RT_LIKELY(pTlbe->uTag == uTag))
1634 {
1635 /*
1636 * Check TLB page table level access flags.
1637 */
1638 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1639 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1640 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1641 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1642 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1643 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1644 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1645 {
1646 /*
1647 * Do the push and return.
1648 */
1649 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1650 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1651 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1652 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1653 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));
1654 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1655 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1656 return;
1657 }
1658 }
1659 }
1660
1661 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1662 outdated page pointer, or other troubles. (This will do a TLB load.) */
1663 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp));
1664# endif
1665 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1666}
1667
1668
1669/**
1670 * 64-bit flat stack pop function that longjmps on error.
1671 */
1672DECL_INLINE_THROW(void)
1673RT_CONCAT3(iemMemFlat64StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1674{
1675 Assert(iGReg < 16);
1676# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1677 /*
1678 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1679 */
1680 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
1681# if TMPL_MEM_TYPE_SIZE > 1
1682 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp)))
1683# endif
1684 {
1685 /*
1686 * TLB lookup.
1687 */
1688 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uOldRsp);
1689 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1690 if (RT_LIKELY(pTlbe->uTag == uTag))
1691 {
1692 /*
1693 * Check TLB page table level access flags.
1694 */
1695 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1696 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1697 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1698 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1699 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1700 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1701 {
1702 /*
1703 * Do the push and return.
1704 */
1705 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1706 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1707 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1708 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
1709 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1710# if TMPL_MEM_TYPE_SIZE == 2
1711 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1712# elif TMPL_MEM_TYPE_SIZE == 8
1713 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1714# else
1715# error "TMPL_MEM_TYPE_SIZE"
1716# endif
1717 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1718 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1719 return;
1720 }
1721 }
1722 }
1723
1724 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1725 outdated page pointer, or other troubles. (This will do a TLB load.) */
1726 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp));
1727# endif
1728 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1729}
1730
1731# endif /* TMPL_MEM_TYPE_SIZE != 4 */
1732
1733# endif /* IEM_WITH_SETJMP */
1734# endif /* TMPL_MEM_WITH_STACK */
1735
1736
1737#endif /* IEM_WITH_SETJMP */
1738
1739#undef TMPL_MEM_TYPE
1740#undef TMPL_MEM_TYPE_ALIGN
1741#undef TMPL_MEM_TYPE_SIZE
1742#undef TMPL_MEM_FN_SUFF
1743#undef TMPL_MEM_FMT_TYPE
1744#undef TMPL_MEM_FMT_DESC
1745#undef TMPL_MEM_NO_STORE
1746#undef TMPL_MEM_ALIGN_CHECK
1747#undef TMPL_MEM_BY_REF
1748
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette