VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h@ 106061

Last change on this file since 106061 was 106061, checked in by vboxsync, 2 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 27.6 KB
Line 
1/* $Id: IEMAllMemRWTmpl.cpp.h 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_ALIGN
34# define TMPL_MEM_TYPE_ALIGN (sizeof(TMPL_MEM_TYPE) - 1)
35#endif
36#ifndef TMPL_MEM_FN_SUFF
37# error "TMPL_MEM_FN_SUFF is undefined"
38#endif
39#ifndef TMPL_MEM_FMT_TYPE
40# error "TMPL_MEM_FMT_TYPE is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_DESC
43# error "TMPL_MEM_FMT_DESC is undefined"
44#endif
45#ifndef TMPL_MEM_MAP_FLAGS_ADD
46# define TMPL_MEM_MAP_FLAGS_ADD (0)
47#endif
48
49
50/**
51 * Standard fetch function.
52 *
53 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
54 * is defined.
55 */
56VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst,
57 uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
58{
59 /* The lazy approach for now... */
60 uint8_t bUnmapInfo;
61 TMPL_MEM_TYPE const *puSrc;
62 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
63 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
64 if (rc == VINF_SUCCESS)
65 {
66 *puDst = *puSrc;
67 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
68 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));
69 }
70 return rc;
71}
72
73
74#ifdef IEM_WITH_SETJMP
75/**
76 * Safe/fallback fetch function that longjmps on error.
77 */
78# ifdef TMPL_MEM_BY_REF
79void
80RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
81{
82# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
83 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
84# endif
85 uint8_t bUnmapInfo;
86 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
87 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
88 *pDst = *pSrc;
89 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
90 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst));
91}
92# else /* !TMPL_MEM_BY_REF */
93TMPL_MEM_TYPE
94RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
95{
96# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
97 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
98# endif
99 uint8_t bUnmapInfo;
100 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
101 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
102 TMPL_MEM_TYPE const uRet = *puSrc;
103 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
104 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
105 return uRet;
106}
107# endif /* !TMPL_MEM_BY_REF */
108#endif /* IEM_WITH_SETJMP */
109
110
111
112/**
113 * Standard store function.
114 *
115 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
116 * is defined.
117 */
118VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
119#ifdef TMPL_MEM_BY_REF
120 TMPL_MEM_TYPE const *pValue) RT_NOEXCEPT
121#else
122 TMPL_MEM_TYPE uValue) RT_NOEXCEPT
123#endif
124{
125 /* The lazy approach for now... */
126 uint8_t bUnmapInfo;
127 TMPL_MEM_TYPE *puDst;
128 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(*puDst),
129 iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
130 if (rc == VINF_SUCCESS)
131 {
132#ifdef TMPL_MEM_BY_REF
133 *puDst = *pValue;
134#else
135 *puDst = uValue;
136#endif
137 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
138#ifdef TMPL_MEM_BY_REF
139 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
140#else
141 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
142#endif
143 }
144 return rc;
145}
146
147
148#ifdef IEM_WITH_SETJMP
149/**
150 * Stores a data byte, longjmp on error.
151 *
152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
153 * @param iSegReg The index of the segment register to use for
154 * this access. The base and limits are checked.
155 * @param GCPtrMem The address of the guest memory.
156 * @param uValue The value to store.
157 */
158void RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
159#ifdef TMPL_MEM_BY_REF
160 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
161#else
162 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
163#endif
164{
165# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
166 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
167# endif
168#ifdef TMPL_MEM_BY_REF
169 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
170#else
171 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
172#endif
173 uint8_t bUnmapInfo;
174 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,
175 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
176#ifdef TMPL_MEM_BY_REF
177 *puDst = *pValue;
178#else
179 *puDst = uValue;
180#endif
181 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
182}
183#endif /* IEM_WITH_SETJMP */
184
185
186#ifdef IEM_WITH_SETJMP
187
188/**
189 * Maps a data buffer for atomic read+write direct access (or via a bounce
190 * buffer), longjmp on error.
191 *
192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
193 * @param pbUnmapInfo Pointer to unmap info variable.
194 * @param iSegReg The index of the segment register to use for
195 * this access. The base and limits are checked.
196 * @param GCPtrMem The address of the guest memory.
197 */
198TMPL_MEM_TYPE *
199RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
200 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
201{
202# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
203 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
204# endif
205 Log8(("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
206 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
207 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
208 IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
209}
210
211
212/**
213 * Maps a data buffer for read+write direct access (or via a bounce buffer),
214 * longjmp on error.
215 *
216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
217 * @param pbUnmapInfo Pointer to unmap info variable.
218 * @param iSegReg The index of the segment register to use for
219 * this access. The base and limits are checked.
220 * @param GCPtrMem The address of the guest memory.
221 */
222TMPL_MEM_TYPE *
223RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
224 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
225{
226# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
227 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
228# endif
229 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
230 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
231 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
232 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
233}
234
235
236/**
237 * Maps a data buffer for writeonly direct access (or via a bounce buffer),
238 * longjmp on error.
239 *
240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
241 * @param pbUnmapInfo Pointer to unmap info variable.
242 * @param iSegReg The index of the segment register to use for
243 * this access. The base and limits are checked.
244 * @param GCPtrMem The address of the guest memory.
245 */
246TMPL_MEM_TYPE *
247RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
248 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
249{
250# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
251 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
252# endif
253 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
254 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
255 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
256 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
257}
258
259
260/**
261 * Maps a data buffer for readonly direct access (or via a bounce buffer),
262 * longjmp on error.
263 *
264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
265 * @param pbUnmapInfo Pointer to unmap info variable.
266 * @param iSegReg The index of the segment register to use for
267 * this access. The base and limits are checked.
268 * @param GCPtrMem The address of the guest memory.
269 */
270TMPL_MEM_TYPE const *
271RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
272 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
273{
274# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
275 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
276# endif
277 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
278 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
279 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
280 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
281}
282
283#endif /* IEM_WITH_SETJMP */
284
285
286#ifdef TMPL_MEM_WITH_STACK
287
288/**
289 * Pops a general purpose register off the stack.
290 *
291 * @returns Strict VBox status code.
292 * @param pVCpu The cross context virtual CPU structure of the
293 * calling thread.
294 * @param iGReg The GREG to load the popped value into.
295 */
296VBOXSTRICTRC RT_CONCAT(iemMemStackPopGReg,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iGReg) RT_NOEXCEPT
297{
298 Assert(iGReg < 16);
299
300 /* Increment the stack pointer. */
301 uint64_t uNewRsp;
302 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
303
304 /* Load the word the lazy way. */
305 uint8_t bUnmapInfo;
306 TMPL_MEM_TYPE const *puSrc;
307 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
308 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
309 if (rc == VINF_SUCCESS)
310 {
311 TMPL_MEM_TYPE const uValue = *puSrc;
312 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
313
314 /* Commit the register and new RSP values. */
315 if (rc == VINF_SUCCESS)
316 {
317 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
318 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
319 pVCpu->cpum.GstCtx.rsp = uNewRsp;
320 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
321 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
322 else
323 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
324 return VINF_SUCCESS;
325 }
326 }
327 return rc;
328}
329
330
331/**
332 * Pushes an item onto the stack, regular version.
333 *
334 * @returns Strict VBox status code.
335 * @param pVCpu The cross context virtual CPU structure of the
336 * calling thread.
337 * @param uValue The value to push.
338 */
339VBOXSTRICTRC RT_CONCAT(iemMemStackPush,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) RT_NOEXCEPT
340{
341 /* Increment the stack pointer. */
342 uint64_t uNewRsp;
343 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
344
345 /* Write the dword the lazy way. */
346 uint8_t bUnmapInfo;
347 TMPL_MEM_TYPE *puDst;
348 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
349 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
350 if (rc == VINF_SUCCESS)
351 {
352 *puDst = uValue;
353 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
354
355 /* Commit the new RSP value unless we an access handler made trouble. */
356 if (rc == VINF_SUCCESS)
357 {
358 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
359 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
360 pVCpu->cpum.GstCtx.rsp = uNewRsp;
361 return VINF_SUCCESS;
362 }
363 }
364
365 return rc;
366}
367
368
369/**
370 * Pops a generic item off the stack, regular version.
371 *
372 * This is used by C-implementation code.
373 *
374 * @returns Strict VBox status code.
375 * @param pVCpu The cross context virtual CPU structure of the
376 * calling thread.
377 * @param puValue Where to store the popped value.
378 */
379VBOXSTRICTRC RT_CONCAT(iemMemStackPop,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue) RT_NOEXCEPT
380{
381 /* Increment the stack pointer. */
382 uint64_t uNewRsp;
383 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
384
385 /* Write the word the lazy way. */
386 uint8_t bUnmapInfo;
387 TMPL_MEM_TYPE const *puSrc;
388 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
389 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
390 if (rc == VINF_SUCCESS)
391 {
392 *puValue = *puSrc;
393 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
394
395 /* Commit the new RSP value. */
396 if (rc == VINF_SUCCESS)
397 {
398 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
399 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue));
400 pVCpu->cpum.GstCtx.rsp = uNewRsp;
401 return VINF_SUCCESS;
402 }
403 }
404 return rc;
405}
406
407
408/**
409 * Pushes an item onto the stack, using a temporary stack pointer.
410 *
411 * @returns Strict VBox status code.
412 * @param pVCpu The cross context virtual CPU structure of the
413 * calling thread.
414 * @param uValue The value to push.
415 * @param pTmpRsp Pointer to the temporary stack pointer.
416 */
417VBOXSTRICTRC RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
418{
419 /* Increment the stack pointer. */
420 RTUINT64U NewRsp = *pTmpRsp;
421 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
422
423 /* Write the word the lazy way. */
424 uint8_t bUnmapInfo;
425 TMPL_MEM_TYPE *puDst;
426 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
427 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
428 if (rc == VINF_SUCCESS)
429 {
430 *puDst = uValue;
431 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
432
433 /* Commit the new RSP value unless we an access handler made trouble. */
434 if (rc == VINF_SUCCESS)
435 {
436 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
437 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue));
438 *pTmpRsp = NewRsp;
439 return VINF_SUCCESS;
440 }
441 }
442 return rc;
443}
444
445
446/**
447 * Pops an item off the stack, using a temporary stack pointer.
448 *
449 * @returns Strict VBox status code.
450 * @param pVCpu The cross context virtual CPU structure of the
451 * calling thread.
452 * @param puValue Where to store the popped value.
453 * @param pTmpRsp Pointer to the temporary stack pointer.
454 */
455VBOXSTRICTRC
456RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
457{
458 /* Increment the stack pointer. */
459 RTUINT64U NewRsp = *pTmpRsp;
460 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
461
462 /* Write the word the lazy way. */
463 uint8_t bUnmapInfo;
464 TMPL_MEM_TYPE const *puSrc;
465 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
466 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
467 if (rc == VINF_SUCCESS)
468 {
469 *puValue = *puSrc;
470 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
471
472 /* Commit the new RSP value. */
473 if (rc == VINF_SUCCESS)
474 {
475 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
476 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue));
477 *pTmpRsp = NewRsp;
478 return VINF_SUCCESS;
479 }
480 }
481 return rc;
482}
483
484
485# ifdef IEM_WITH_SETJMP
486
487/**
488 * Safe/fallback stack store function that longjmps on error.
489 */
490void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
491 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
492{
493# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
494 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
495# endif
496
497 uint8_t bUnmapInfo;
498 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem,
499 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
500 *puDst = uValue;
501 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
502
503 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
504}
505
506
507# ifdef TMPL_WITH_PUSH_SREG
508/**
509 * Safe/fallback stack SREG store function that longjmps on error.
510 */
511void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
512 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
513{
514# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
515 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
516# endif
517
518 /* bs3-cpu-weird-1 explores this instruction. AMD 3990X does it by the book,
519 with a zero extended DWORD write. While my Intel 10890XE goes all weird
520 in real mode where it will write a DWORD with the top word of EFLAGS in
521 the top half. In all other modes it does a WORD access. */
522
523 /** @todo Docs indicate the behavior changed maybe in Pentium or Pentium Pro.
524 * Check ancient hardware when it actually did change. */
525 uint8_t bUnmapInfo;
526 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
527 {
528 if (!IEM_IS_REAL_MODE(pVCpu))
529 {
530 /* WORD per intel specs. */
531 uint16_t *puDst = (uint16_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem,
532 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */
533 *puDst = (uint16_t)uValue;
534 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
535 Log12(("IEM WR 'word' SS|%RGv: %#06x [sreg/i]\n", GCPtrMem, (uint16_t)uValue));
536 }
537 else
538 {
539 /* DWORD real mode weirness observed on 10980XE. */
540 /** @todo Check this on other intel CPUs and when pushing registers other
541 * than FS (which all that bs3-cpu-weird-1 does atm). (Maybe this is
542 * something for the CPU profile... Hope not.) */
543 uint32_t *puDst = (uint32_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
544 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);
545 *puDst = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
546 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
547 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg/ir]\n", GCPtrMem, uValue));
548 }
549 }
550 else
551 {
552 /* DWORD per spec. */
553 uint32_t *puDst = (uint32_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
554 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);
555 *puDst = uValue;
556 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
557 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrMem, uValue));
558 }
559}
560# endif /* TMPL_WITH_PUSH_SREG */
561
562
563/**
564 * Safe/fallback stack fetch function that longjmps on error.
565 */
566TMPL_MEM_TYPE RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
567{
568# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
569 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
570# endif
571
572 /* Read the data. */
573 uint8_t bUnmapInfo;
574 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
575 GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
576 TMPL_MEM_TYPE const uValue = *puSrc;
577 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
578
579 /* Commit the register and RSP values. */
580 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
581 return uValue;
582}
583
584
585/**
586 * Safe/fallback stack push function that longjmps on error.
587 */
588void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
589{
590# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
591 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
592# endif
593
594 /* Decrement the stack pointer (prep). */
595 uint64_t uNewRsp;
596 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
597
598 /* Write the data. */
599 uint8_t bUnmapInfo;
600 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
601 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
602 *puDst = uValue;
603 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
604
605 /* Commit the RSP change. */
606 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
607 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
608 pVCpu->cpum.GstCtx.rsp = uNewRsp;
609}
610
611
612/**
613 * Safe/fallback stack pop greg function that longjmps on error.
614 */
615void RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
616{
617# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
618 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
619# endif
620
621 /* Increment the stack pointer. */
622 uint64_t uNewRsp;
623 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
624
625 /* Read the data. */
626 uint8_t bUnmapInfo;
627 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
628 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
629 TMPL_MEM_TYPE const uValue = *puSrc;
630 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
631
632 /* Commit the register and RSP values. */
633 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
634 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
635 pVCpu->cpum.GstCtx.rsp = uNewRsp;
636 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
637 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
638 else
639 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
640}
641
642# ifdef TMPL_WITH_PUSH_SREG
643/**
644 * Safe/fallback stack push function that longjmps on error.
645 */
646void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
647{
648# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
649 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
650# endif
651
652 /* Decrement the stack pointer (prep). */
653 uint64_t uNewRsp;
654 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
655
656 /* Write the data. */
657 /* The intel docs talks about zero extending the selector register
658 value. My actual intel CPU here might be zero extending the value
659 but it still only writes the lower word... */
660 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
661 * happens when crossing an electric page boundrary, is the high word checked
662 * for write accessibility or not? Probably it is. What about segment limits?
663 * It appears this behavior is also shared with trap error codes.
664 *
665 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
666 * ancient hardware when it actually did change. */
667 uint8_t bUnmapInfo;
668 uint16_t *puDst = (uint16_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
669 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */
670 *puDst = (uint16_t)uValue;
671 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
672
673 /* Commit the RSP change. */
674 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
675 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
676 pVCpu->cpum.GstCtx.rsp = uNewRsp;
677}
678# endif /* TMPL_WITH_PUSH_SREG */
679
680# endif /* IEM_WITH_SETJMP */
681
682#endif /* TMPL_MEM_WITH_STACK */
683
684/* clean up */
685#undef TMPL_MEM_TYPE
686#undef TMPL_MEM_TYPE_ALIGN
687#undef TMPL_MEM_FN_SUFF
688#undef TMPL_MEM_FMT_TYPE
689#undef TMPL_MEM_FMT_DESC
690#undef TMPL_WITH_PUSH_SREG
691#undef TMPL_MEM_MAP_FLAGS_ADD
692
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette