VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h@ 100866

Last change on this file since 100866 was 100866, checked in by vboxsync, 17 months ago

VMM/IEM: Implemented the two flat64 stack function variants. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 51.2 KB
Line 
1/* $Id: IEMAllMemRWTmplInline.cpp.h 100866 2023-08-13 15:00:44Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_SIZE
34# error "TMPL_MEM_TYPE_SIZE is undefined"
35#endif
36#ifndef TMPL_MEM_TYPE_ALIGN
37# error "TMPL_MEM_TYPE_ALIGN is undefined"
38#endif
39#ifndef TMPL_MEM_FN_SUFF
40# error "TMPL_MEM_FN_SUFF is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_TYPE
43# error "TMPL_MEM_FMT_TYPE is undefined"
44#endif
45#ifndef TMPL_MEM_FMT_DESC
46# error "TMPL_MEM_FMT_DESC is undefined"
47#endif
48
49#if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
50# error Have not implemented TMPL_MEM_TYPE_ALIGN smaller than TMPL_MEM_TYPE_SIZE - 1.
51#endif
52
53/** @todo fix logging */
54
55#ifdef IEM_WITH_SETJMP
56
57
58/*********************************************************************************************************************************
59* Fetches *
60*********************************************************************************************************************************/
61
62/**
63 * Inlined fetch function that longjumps on error.
64 *
65 * @note The @a iSegRef is not allowed to be UINT8_MAX!
66 */
67DECL_INLINE_THROW(TMPL_MEM_TYPE)
68RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
69{
70 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
71# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
72 /*
73 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
74 */
75 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
76# if TMPL_MEM_TYPE_SIZE > 1
77 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */
78 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
79# endif
80 {
81 /*
82 * TLB lookup.
83 */
84 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
85 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
86 if (RT_LIKELY(pTlbe->uTag == uTag))
87 {
88 /*
89 * Check TLB page table level access flags.
90 */
91 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
92 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
93 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
94 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
95 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
96 {
97 /*
98 * Fetch and return the data.
99 */
100 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
101 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
102 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
103 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
104 Log9(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
105 return uRet;
106 }
107 }
108 }
109
110 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
111 outdated page pointer, or other troubles. (This will do a TLB load.) */
112 Log10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));
113# endif
114 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
115}
116
117
118/**
119 * Inlined flat addressing fetch function that longjumps on error.
120 */
121DECL_INLINE_THROW(TMPL_MEM_TYPE)
122RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
123{
124# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
125 /*
126 * Check that it doesn't cross a page boundrary.
127 */
128# if TMPL_MEM_TYPE_SIZE > 1
129 AssertCompile(X86_CR0_AM == X86_EFL_AC);
130 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM);
131 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */
132 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
133# endif
134 {
135 /*
136 * TLB lookup.
137 */
138 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
139 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
140 if (RT_LIKELY(pTlbe->uTag == uTag))
141 {
142 /*
143 * Check TLB page table level access flags.
144 */
145 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
146 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
147 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
148 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
149 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
150 {
151 /*
152 * Fetch and return the dword
153 */
154 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
155 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
156 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
157 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
158 Log9(("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
159 return uRet;
160 }
161 }
162 }
163
164 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
165 outdated page pointer, or other troubles. (This will do a TLB load.) */
166 Log10Func(("%RGv falling back\n", GCPtrMem));
167# endif
168 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
169}
170
171
172/*********************************************************************************************************************************
173* Stores *
174*********************************************************************************************************************************/
175# ifndef TMPL_MEM_NO_STORE
176
177/**
178 * Inlined store function that longjumps on error.
179 *
180 * @note The @a iSegRef is not allowed to be UINT8_MAX!
181 */
182DECL_INLINE_THROW(void)
183RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
184 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
185{
186# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
187 /*
188 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
189 */
190 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
191# if TMPL_MEM_TYPE_SIZE > 1
192 AssertCompile(X86_CR0_AM == X86_EFL_AC);
193 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM);
194 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */
195 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
196# endif
197 {
198 /*
199 * TLB lookup.
200 */
201 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
202 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
203 if (RT_LIKELY(pTlbe->uTag == uTag))
204 {
205 /*
206 * Check TLB page table level access flags.
207 */
208 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
209 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
210 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
211 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
212 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
213 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
214 {
215 /*
216 * Store the dword and return.
217 */
218 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
219 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
220 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
221 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
222 Log9(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
223 return;
224 }
225 }
226 }
227
228 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
229 outdated page pointer, or other troubles. (This will do a TLB load.) */
230 Log10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));
231# endif
232 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
233}
234
235
236/**
237 * Inlined flat addressing store function that longjumps on error.
238 */
239DECL_INLINE_THROW(void)
240RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
241 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
242{
243# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
244 /*
245 * Check that it doesn't cross a page boundrary.
246 */
247# if TMPL_MEM_TYPE_SIZE > 1
248 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
249 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
250# endif
251 {
252 /*
253 * TLB lookup.
254 */
255 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
256 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
257 if (RT_LIKELY(pTlbe->uTag == uTag))
258 {
259 /*
260 * Check TLB page table level access flags.
261 */
262 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
263 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
264 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
265 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
266 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
267 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
268 {
269 /*
270 * Store the dword and return.
271 */
272 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
273 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
274 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
275 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
276 Log9(("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
277 return;
278 }
279 }
280 }
281
282 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
283 outdated page pointer, or other troubles. (This will do a TLB load.) */
284 Log10Func(("%RGv falling back\n", GCPtrMem));
285# endif
286 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
287}
288
289# endif /* !TMPL_MEM_NO_STORE */
290
291
292/*********************************************************************************************************************************
293* Mapping / Direct Memory Access *
294*********************************************************************************************************************************/
295# ifndef TMPL_MEM_NO_MAPPING
296
297/**
298 * Inlined read-write memory mapping function that longjumps on error.
299 */
300DECL_INLINE_THROW(TMPL_MEM_TYPE *)
301RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
302 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
303{
304# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
305 /*
306 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
307 */
308 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
309# if TMPL_MEM_TYPE_SIZE > 1
310 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
311 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
312# endif
313 {
314 /*
315 * TLB lookup.
316 */
317 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
318 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
319 if (RT_LIKELY(pTlbe->uTag == uTag))
320 {
321 /*
322 * Check TLB page table level access flags.
323 */
324 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
325 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
326 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
327 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
328 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
329 | fNoUser))
330 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
331 {
332 /*
333 * Return the address.
334 */
335 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
336 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
337 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
338 *pbUnmapInfo = 0;
339 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n",
340 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
341 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
342 }
343 }
344 }
345
346 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
347 outdated page pointer, or other troubles. (This will do a TLB load.) */
348 Log10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));
349# endif
350 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
351}
352
353
354/**
355 * Inlined flat read-write memory mapping function that longjumps on error.
356 */
357DECL_INLINE_THROW(TMPL_MEM_TYPE *)
358RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
359 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
360{
361# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
362 /*
363 * Check that the address doesn't cross a page boundrary.
364 */
365# if TMPL_MEM_TYPE_SIZE > 1
366 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
367 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
368# endif
369 {
370 /*
371 * TLB lookup.
372 */
373 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
374 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
375 if (RT_LIKELY(pTlbe->uTag == uTag))
376 {
377 /*
378 * Check TLB page table level access flags.
379 */
380 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
381 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
382 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
383 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
384 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
385 | fNoUser))
386 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
387 {
388 /*
389 * Return the address.
390 */
391 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
392 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
393 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
394 *pbUnmapInfo = 0;
395 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
396 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
397 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
398 }
399 }
400 }
401
402 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
403 outdated page pointer, or other troubles. (This will do a TLB load.) */
404 Log10Func(("%RGv falling back\n", GCPtrMem));
405# endif
406 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
407}
408
409
410/**
411 * Inlined write-only memory mapping function that longjumps on error.
412 */
413DECL_INLINE_THROW(TMPL_MEM_TYPE *)
414RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
415 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
416{
417# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
418 /*
419 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
420 */
421 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
422# if TMPL_MEM_TYPE_SIZE > 1
423 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
424 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
425# endif
426 {
427 /*
428 * TLB lookup.
429 */
430 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
431 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
432 if (RT_LIKELY(pTlbe->uTag == uTag))
433 {
434 /*
435 * Check TLB page table level access flags.
436 */
437 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
438 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
439 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
440 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
441 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
442 | fNoUser))
443 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
444 {
445 /*
446 * Return the address.
447 */
448 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
449 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
450 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
451 *pbUnmapInfo = 0;
452 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n",
453 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
454 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
455 }
456 }
457 }
458
459 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
460 outdated page pointer, or other troubles. (This will do a TLB load.) */
461 Log10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));
462# endif
463 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
464}
465
466
467/**
468 * Inlined flat write-only memory mapping function that longjumps on error.
469 */
470DECL_INLINE_THROW(TMPL_MEM_TYPE *)
471RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
472 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
473{
474# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
475 /*
476 * Check that the address doesn't cross a page boundrary.
477 */
478# if TMPL_MEM_TYPE_SIZE > 1
479 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
480 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
481# endif
482 {
483 /*
484 * TLB lookup.
485 */
486 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
487 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
488 if (RT_LIKELY(pTlbe->uTag == uTag))
489 {
490 /*
491 * Check TLB page table level access flags.
492 */
493 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
494 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
495 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
496 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
497 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
498 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
499 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
500 {
501 /*
502 * Return the address.
503 */
504 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
505 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
506 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
507 *pbUnmapInfo = 0;
508 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
509 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
510 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
511 }
512 }
513 }
514
515 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
516 outdated page pointer, or other troubles. (This will do a TLB load.) */
517 Log10Func(("%RGv falling back\n", GCPtrMem));
518# endif
519 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
520}
521
522
523/**
524 * Inlined read-only memory mapping function that longjumps on error.
525 */
526DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
527RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
528 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
529{
530# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
531 /*
532 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
533 */
534 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
535# if TMPL_MEM_TYPE_SIZE > 1
536 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
537 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
538# endif
539 {
540 /*
541 * TLB lookup.
542 */
543 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
544 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
545 if (RT_LIKELY(pTlbe->uTag == uTag))
546 {
547 /*
548 * Check TLB page table level access flags.
549 */
550 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
551 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
552 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
553 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
554 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
555 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
556 {
557 /*
558 * Return the address.
559 */
560 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
561 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
562 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
563 *pbUnmapInfo = 0;
564 Log9(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n",
565 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
566 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
567 }
568 }
569 }
570
571 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
572 outdated page pointer, or other troubles. (This will do a TLB load.) */
573 Log10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));
574# endif
575 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
576}
577
578
579/**
580 * Inlined read-only memory mapping function that longjumps on error.
581 */
582DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
583RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
584 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
585{
586# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
587 /*
588 * Check that the address doesn't cross a page boundrary.
589 */
590# if TMPL_MEM_TYPE_SIZE > 1
591 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
592 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
593# endif
594 {
595 /*
596 * TLB lookup.
597 */
598 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
599 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
600 if (RT_LIKELY(pTlbe->uTag == uTag))
601 {
602 /*
603 * Check TLB page table level access flags.
604 */
605 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
606 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
607 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
608 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
609 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
610 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
611 {
612 /*
613 * Return the address.
614 */
615 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
616 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
617 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
618 *pbUnmapInfo = 0;
619 Log9(("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
620 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
621 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
622 }
623 }
624 }
625
626 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
627 outdated page pointer, or other troubles. (This will do a TLB load.) */
628 Log10Func(("%RGv falling back\n", GCPtrMem));
629# endif
630 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
631}
632
633# endif /* !TMPL_MEM_NO_MAPPING */
634
635
636/*********************************************************************************************************************************
637* Stack Access *
638*********************************************************************************************************************************/
639# ifdef TMPL_MEM_WITH_STACK
640# ifdef IEM_WITH_SETJMP
641
642/**
643 * Stack push function that longjmps on error.
644 */
645DECL_INLINE_THROW(void)
646RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
647{
648# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
649 /*
650 * Decrement the stack pointer (prep), apply segmentation and check that
651 * the item doesn't cross a page boundrary.
652 */
653 uint64_t uNewRsp;
654 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
655 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
656# if TMPL_MEM_TYPE_SIZE > 1
657 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
658 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
659# endif
660 {
661 /*
662 * TLB lookup.
663 */
664 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
665 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
666 if (RT_LIKELY(pTlbe->uTag == uTag))
667 {
668 /*
669 * Check TLB page table level access flags.
670 */
671 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
672 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
673 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
674 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
675 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
676 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
677 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
678 {
679 /*
680 * Do the push and return.
681 */
682 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
683 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
684 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
685 Log8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
686 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
687 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
688 pVCpu->cpum.GstCtx.rsp = uNewRsp;
689 return;
690 }
691 }
692 }
693
694 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
695 outdated page pointer, or other troubles. (This will do a TLB load.) */
696 Log10Func(("%RGv falling back\n", GCPtrEff));
697# endif
698 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
699}
700
701
702/**
703 * Stack pop function that longjmps on error.
704 */
705DECL_INLINE_THROW(TMPL_MEM_TYPE)
706RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
707{
708# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
709 /*
710 * Increment the stack pointer (prep), apply segmentation and check that
711 * the item doesn't cross a page boundrary.
712 */
713 uint64_t uNewRsp;
714 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
715 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
716# if TMPL_MEM_TYPE_SIZE > 1
717 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
718 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
719# endif
720 {
721 /*
722 * TLB lookup.
723 */
724 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
725 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
726 if (RT_LIKELY(pTlbe->uTag == uTag))
727 {
728 /*
729 * Check TLB page table level access flags.
730 */
731 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
732 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
733 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
734 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
735 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
736 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
737 {
738 /*
739 * Do the push and return.
740 */
741 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
742 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
743 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
744 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
745 Log9(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
746 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet));
747 pVCpu->cpum.GstCtx.rsp = uNewRsp;
748 return uRet;
749 }
750 }
751 }
752
753 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
754 outdated page pointer, or other troubles. (This will do a TLB load.) */
755 Log10Func(("%RGv falling back\n", GCPtrEff));
756# endif
757 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);
758}
759
760# ifdef TMPL_WITH_PUSH_SREG
761/**
762 * Stack segment push function that longjmps on error.
763 *
764 * For a detailed discussion of the behaviour see the fallback functions
765 * iemMemStackPushUxxSRegSafeJmp.
766 */
767DECL_INLINE_THROW(void)
768RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
769{
770# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
771 /*
772 * Decrement the stack pointer (prep), apply segmentation and check that
773 * the item doesn't cross a page boundrary.
774 */
775 uint64_t uNewRsp;
776 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
777 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
778# if TMPL_MEM_TYPE_SIZE > 1
779 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
780 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
781# endif
782 {
783 /*
784 * TLB lookup.
785 */
786 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
787 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
788 if (RT_LIKELY(pTlbe->uTag == uTag))
789 {
790 /*
791 * Check TLB page table level access flags.
792 */
793 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
794 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
795 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
796 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
797 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
798 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
799 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
800 {
801 /*
802 * Do the push and return.
803 */
804 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
805 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
806 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
807 Log8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
808 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
809 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
810 pVCpu->cpum.GstCtx.rsp = uNewRsp;
811 return;
812 }
813 }
814 }
815
816 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
817 outdated page pointer, or other troubles. (This will do a TLB load.) */
818 Log10Func(("%RGv falling back\n", GCPtrEff));
819# endif
820 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
821}
822
823# endif
824# if TMPL_MEM_TYPE_SIZE != 8
825
826/**
827 * 32-bit flat stack push function that longjmps on error.
828 */
829DECL_INLINE_THROW(void)
830RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
831{
832 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
833 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
834 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
835 && pVCpu->cpum.GstCtx.ss.u64Base == 0);
836# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
837 /*
838 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
839 */
840 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
841# if TMPL_MEM_TYPE_SIZE > 1
842 if (RT_LIKELY( !(uNewEsp & TMPL_MEM_TYPE_ALIGN)
843 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE) ))
844# endif
845 {
846 /*
847 * TLB lookup.
848 */
849 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
850 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
851 if (RT_LIKELY(pTlbe->uTag == uTag))
852 {
853 /*
854 * Check TLB page table level access flags.
855 */
856 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
857 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
858 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
859 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
860 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
861 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
862 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
863 {
864 /*
865 * Do the push and return.
866 */
867 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
868 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
869 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
870 Log8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",
871 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
872 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue;
873 pVCpu->cpum.GstCtx.rsp = uNewEsp;
874 return;
875 }
876 }
877 }
878
879 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
880 outdated page pointer, or other troubles. (This will do a TLB load.) */
881 Log10Func(("%RX32 falling back\n", uNewEsp));
882# endif
883 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
884}
885
886
887/**
888 * 32-bit flat stack pop function that longjmps on error.
889 */
890DECL_INLINE_THROW(TMPL_MEM_TYPE)
891RT_CONCAT3(iemMemFlat32StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
892{
893# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
894 /*
895 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
896 */
897 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
898# if TMPL_MEM_TYPE_SIZE > 1
899 if (RT_LIKELY( !(uOldEsp & TMPL_MEM_TYPE_ALIGN)
900 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uOldEsp, TMPL_MEM_TYPE) ))
901# endif
902 {
903 /*
904 * TLB lookup.
905 */
906 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */
907 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
908 if (RT_LIKELY(pTlbe->uTag == uTag))
909 {
910 /*
911 * Check TLB page table level access flags.
912 */
913 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
914 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
915 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
916 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
917 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
918 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
919 {
920 /*
921 * Do the push and return.
922 */
923 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
924 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
925 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
926 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
927 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE);
928 Log9(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE "\n",
929 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uRet));
930 return uRet;
931 }
932 }
933 }
934
935 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
936 outdated page pointer, or other troubles. (This will do a TLB load.) */
937 Log10Func(("%RX32 falling back\n", uOldEsp));
938# endif
939 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);
940}
941
942# endif /* TMPL_MEM_TYPE_SIZE != 8*/
943# ifdef TMPL_WITH_PUSH_SREG
944/**
945 * 32-bit flat stack segment push function that longjmps on error.
946 *
947 * For a detailed discussion of the behaviour see the fallback functions
948 * iemMemStackPushUxxSRegSafeJmp.
949 */
950DECL_INLINE_THROW(void)
951RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
952{
953# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
954 /*
955 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
956 */
957 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
958 if (RT_LIKELY( !(uNewEsp & (sizeof(uint16_t) - 1))
959 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t) ))
960 {
961 /*
962 * TLB lookup.
963 */
964 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
965 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
966 if (RT_LIKELY(pTlbe->uTag == uTag))
967 {
968 /*
969 * Check TLB page table level access flags.
970 */
971 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
972 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
973 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
974 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
975 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
976 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
977 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
978 {
979 /*
980 * Do the push and return.
981 */
982 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
983 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
984 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
985 Log8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",
986 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
987 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
988 pVCpu->cpum.GstCtx.rsp = uNewEsp;
989 return;
990 }
991 }
992 }
993
994 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
995 outdated page pointer, or other troubles. (This will do a TLB load.) */
996 Log10Func(("%RX32 falling back\n", uNewEsp));
997# endif
998 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
999}
1000
1001# endif
1002# if TMPL_MEM_TYPE_SIZE != 4
1003
1004/**
1005 * 64-bit flat stack push function that longjmps on error.
1006 */
1007DECL_INLINE_THROW(void)
1008RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1009{
1010# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1011 /*
1012 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1013 */
1014 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
1015# if TMPL_MEM_TYPE_SIZE > 1
1016 if (RT_LIKELY( !(uNewRsp & TMPL_MEM_TYPE_ALIGN)
1017 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewRsp, TMPL_MEM_TYPE) ))
1018# endif
1019 {
1020 /*
1021 * TLB lookup.
1022 */
1023 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uNewRsp);
1024 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1025 if (RT_LIKELY(pTlbe->uTag == uTag))
1026 {
1027 /*
1028 * Check TLB page table level access flags.
1029 */
1030 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1031 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1032 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1033 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1034 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1035 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1036 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1037 {
1038 /*
1039 * Do the push and return.
1040 */
1041 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1042 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1043 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1044 Log8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1045 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));
1046 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1047 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1048 return;
1049 }
1050 }
1051 }
1052
1053 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1054 outdated page pointer, or other troubles. (This will do a TLB load.) */
1055 Log10Func(("%RX64 falling back\n", uNewRsp));
1056# endif
1057 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1058}
1059
1060
1061/**
1062 * 64-bit flat stack pop function that longjmps on error.
1063 */
1064DECL_INLINE_THROW(TMPL_MEM_TYPE)
1065RT_CONCAT3(iemMemFlat64StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1066{
1067# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1068 /*
1069 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1070 */
1071 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
1072# if TMPL_MEM_TYPE_SIZE > 1
1073 if (RT_LIKELY( !(uOldRsp & TMPL_MEM_TYPE_ALIGN)
1074 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uOldRsp, TMPL_MEM_TYPE) ))
1075# endif
1076 {
1077 /*
1078 * TLB lookup.
1079 */
1080 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uOldRsp);
1081 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1082 if (RT_LIKELY(pTlbe->uTag == uTag))
1083 {
1084 /*
1085 * Check TLB page table level access flags.
1086 */
1087 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1088 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1089 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1090 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1091 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1092 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1093 {
1094 /*
1095 * Do the push and return.
1096 */
1097 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1098 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1099 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1100 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
1101 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE);
1102 Log9(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE "\n",
1103 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uRet));
1104 return uRet;
1105 }
1106 }
1107 }
1108
1109 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1110 outdated page pointer, or other troubles. (This will do a TLB load.) */
1111 Log10Func(("%RX64 falling back\n", uOldRsp));
1112# endif
1113 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);
1114}
1115
1116#endif /* TMPL_MEM_TYPE_SIZE != 4 */
1117
1118# endif /* IEM_WITH_SETJMP */
1119# endif /* TMPL_MEM_WITH_STACK */
1120
1121
1122#endif /* IEM_WITH_SETJMP */
1123
1124#undef TMPL_MEM_TYPE
1125#undef TMPL_MEM_TYPE_ALIGN
1126#undef TMPL_MEM_TYPE_SIZE
1127#undef TMPL_MEM_FN_SUFF
1128#undef TMPL_MEM_FMT_TYPE
1129#undef TMPL_MEM_FMT_DESC
1130#undef TMPL_MEM_NO_STORE
1131
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette