VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp@ 106443

Last change on this file since 106443 was 106427, checked in by vboxsync, 5 weeks ago

VMM/IEM: Reduced the paramters for iemNativeEmitRetn. bugref:10720

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 49.7 KB
Line 
1/* $Id: IEMAllThrdFuncs.cpp 106427 2024-10-17 10:59:12Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Threaded Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
33# define LOG_GROUP LOG_GROUP_IEM
34#endif
35#define VMCPU_INCL_CPUM_GST_CTX
36#define IEM_WITH_OPAQUE_DECODER_STATE
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70#include "IEMMc.h"
71
72#include "IEMThreadedFunctions.h"
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78
79/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
80 * and only used when we're in 16-bit code on a pre-386 CPU. */
81#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal) \
82 return iemRegAddToIp16AndFinishingNoFlags(pVCpu, a_cbInstr, a_rcNormal)
83
84/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
85 * and used for 16-bit and 32-bit code on 386 and later CPUs. */
86#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32(a_cbInstr, a_rcNormal) \
87 return iemRegAddToEip32AndFinishingNoFlags(pVCpu, a_cbInstr, a_rcNormal)
88
89/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
90 * and only used when we're in 64-bit code. */
91#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64(a_cbInstr, a_rcNormal) \
92 return iemRegAddToRip64AndFinishingNoFlags(pVCpu, a_cbInstr, a_rcNormal)
93
94
95/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
96 * and only used when we're in 16-bit code on a pre-386 CPU and we need to
97 * check and clear flags. */
98#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal) \
99 return iemRegAddToIp16AndFinishingClearingRF(pVCpu, a_cbInstr, a_rcNormal)
100
101/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
102 * and used for 16-bit and 32-bit code on 386 and later CPUs and we need to
103 * check and clear flags. */
104#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbInstr, a_rcNormal) \
105 return iemRegAddToEip32AndFinishingClearingRF(pVCpu, a_cbInstr, a_rcNormal)
106
107/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
108 * and only used when we're in 64-bit code and we need to check and clear
109 * flags. */
110#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbInstr, a_rcNormal) \
111 return iemRegAddToRip64AndFinishingClearingRF(pVCpu, a_cbInstr, a_rcNormal)
112
113#undef IEM_MC_ADVANCE_RIP_AND_FINISH
114
115
116/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as extra
117 * parameter, for use in 16-bit code on a pre-386 CPU. */
118#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal) \
119 return iemRegIp16RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_rcNormal)
120
121/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
122 * size as extra parameters, for use in 16-bit and 32-bit code on 386 and
123 * later CPUs. */
124#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
125 return iemRegEip32RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
126
127/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
128 * size as extra parameters, for use in flat 32-bit code on 386 and later
129 * CPUs. */
130#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
131 return iemRegEip32RelativeJumpS8FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
132
133/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
134 * size as extra parameters, for use in 64-bit code. */
135#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
136 return iemRegRip64RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
137
138/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
139 * size as extra parameters, for use in 64-bit code jumping within a page. */
140#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
141 return iemRegRip64RelativeJumpS8IntraPgAndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
142
143
144/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as extra
145 * parameter, for use in 16-bit code on a pre-386 CPU and we need to check and
146 * clear flags. */
147#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal) \
148 return iemRegIp16RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_rcNormal)
149
150/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
151 * size as extra parameters, for use in 16-bit and 32-bit code on 386 and
152 * later CPUs and we need to check and clear flags. */
153#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
154 return iemRegEip32RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
155
156/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
157 * size as extra parameters, for use in flat 32-bit code on 386 and later
158 * CPUs and we need to check and clear flags. */
159#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
160 return iemRegEip32RelativeJumpS8FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
161
162/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
163 * size as extra parameters, for use in 64-bit code and we need to check and
164 * clear flags. */
165#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
166 return iemRegRip64RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
167
168/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
169 * size as extra parameters, for use in 64-bit code jumping within a page and we
170 * need to check and clear flags. */
171#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
172 return iemRegRip64RelativeJumpS8IntraPgAndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
173
174#undef IEM_MC_REL_JMP_S8_AND_FINISH
175
176
177/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
178 * param, for use in 16-bit code on a pre-386 CPU. */
179#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal) \
180 return iemRegEip32RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
181
182/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
183 * param, for use in 16-bit and 32-bit code on 386 and later CPUs. */
184#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal) \
185 return iemRegEip32RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
186
187/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
188 * param, for use in flat 32-bit code on 386 and later CPUs. */
189#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT(a_i16, a_cbInstr, a_rcNormal) \
190 return iemRegEip32RelativeJumpS16FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
191
192/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
193 * param, for use in 64-bit code. */
194#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) \
195 return iemRegRip64RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
196
197/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
198 * param, for use in 64-bit code jumping with a page.
199 * @note No special function for this, there is nothing to save here. */
200#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG(a_i16, a_cbInstr, a_rcNormal) \
201 return iemRegRip64RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
202
203
204/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
205 * param, for use in 16-bit code on a pre-386 CPU and we need to check and
206 * clear flags. */
207#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
208 return iemRegEip32RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
209
210/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
211 * param, for use in 16-bit and 32-bit code on 386 and later CPUs and we need
212 * to check and clear flags. */
213#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
214 return iemRegEip32RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
215
216/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
217 * param, for use in flat 32-bit code on 386 and later CPUs and we need
218 * to check and clear flags. */
219#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
220 return iemRegEip32RelativeJumpS16FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
221
222/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
223 * param, for use in 64-bit code and we need to check and clear flags. */
224#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
225 return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
226
227/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
228 * param, for use in 64-bit code jumping within a page and we need to check and
229 * clear flags.
230 * @note No special function for this, there is nothing to save here. */
231#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
232 return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
233
234#undef IEM_MC_REL_JMP_S16_AND_FINISH
235
236
237/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
238 * an extra parameter - dummy for pre-386 variations not eliminated by the
239 * python script. */
240#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal) \
241 do { RT_NOREF(pVCpu, a_i32, a_cbInstr, a_rcNormal); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
242
243/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
244 * an extra parameter, for use in 16-bit and 32-bit code on 386+. */
245#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal) \
246 return iemRegEip32RelativeJumpS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
247
248/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
249 * an extra parameter, for use in flat 32-bit code on 386+. */
250#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT(a_i32, a_cbInstr, a_rcNormal) \
251 return iemRegEip32RelativeJumpS32FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
252
253/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
254 * an extra parameter, for use in 64-bit code. */
255#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) \
256 return iemRegRip64RelativeJumpS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
257
258/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
259 * an extra parameter, for use in 64-bit code jumping within a page. */
260#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG(a_i32, a_cbInstr, a_rcNormal) \
261 return iemRegRip64RelativeJumpS32IntraPgAndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
262
263
264/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
265 * an extra parameter - dummy for pre-386 variations not eliminated by the
266 * python script. */
267#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
268 do { RT_NOREF(pVCpu, a_i32, a_cbInstr, a_rcNormal); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
269
270/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
271 * an extra parameter, for use in 16-bit and 32-bit code on 386+ and we need
272 * to check and clear flags. */
273#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
274 return iemRegEip32RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
275
276/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
277 * an extra parameter, for use in flat 32-bit code on 386+ and we need
278 * to check and clear flags. */
279#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
280 return iemRegEip32RelativeJumpS32FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
281
282/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
283 * an extra parameter, for use in 64-bit code and we need to check and clear
284 * flags. */
285#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
286 return iemRegRip64RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
287
288/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
289 * an extra parameter, for use in 64-bit code jumping within a page and we need
290 * to check and clear flags. */
291#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
292 return iemRegRip64RelativeJumpS32IntraPgAndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
293
294#undef IEM_MC_REL_JMP_S32_AND_FINISH
295
296
297
298/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets. */
299#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP) \
300 return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP))
301
302/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets. */
303#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP) \
304 return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP))
305
306/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code. */
307#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP) \
308 return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP))
309
310/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets that checks and
311 * clears flags. */
312#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP) \
313 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), 0 /* cbInstr - not used */)
314
315/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets that checks and
316 * clears flags. */
317#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP) \
318 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), 0 /* cbInstr - not used */)
319
320/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code that checks and
321 * clears flags. */
322#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP) \
323 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), 0 /* cbInstr - not used */)
324
325#undef IEM_MC_SET_RIP_U16_AND_FINISH
326
327
328/** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets. */
329#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP) \
330 return iemRegRipJumpU32AndFinishNoFlags((pVCpu), (a_u32NewEIP))
331
332/** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code. */
333#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP) \
334 return iemRegRipJumpU32AndFinishNoFlags((pVCpu), (a_u32NewEIP))
335
336/** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets that checks and
337 * clears flags. */
338#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP) \
339 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewEIP), 0 /* cbInstr - not used */)
340
341/** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code that checks
342 * and clears flags. */
343#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) \
344 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewEIP), 0 /* cbInstr - not used */)
345
346#undef IEM_MC_SET_RIP_U32_AND_FINISH
347
348
349/** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code. */
350#define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP) \
351 return iemRegRipJumpU64AndFinishNoFlags((pVCpu), (a_u32NewEIP))
352
353/** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code that checks
354 * and clears flags. */
355#define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) \
356 return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u32NewEIP), 0 /* cbInstr - not used */)
357
358#undef IEM_MC_SET_RIP_U64_AND_FINISH
359
360
361/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
362 * param, for use in 16-bit code on a pre-386 CPU. */
363#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr) \
364 return iemRegRipRelativeCallS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16))
365
366/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
367 * param, for use in 16-bit and 32-bit code on 386 and later CPUs. */
368#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr) \
369 return iemRegRipRelativeCallS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16))
370
371/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
372 * param, for use in 64-bit code. */
373#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr) \
374 return iemRegRipRelativeCallS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16))
375
376
377/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
378 * param, for use in 16-bit code on a pre-386 CPU and we need to check and
379 * clear flags. */
380#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr) \
381 return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
382
383/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
384 * param, for use in 16-bit and 32-bit code on 386 and later CPUs and we need
385 * to check and clear flags. */
386#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr) \
387 return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
388
389/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
390 * param, for use in 64-bit code and we need to check and clear flags. */
391#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr) \
392 return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
393
394#undef IEM_MC_REL_CALL_S16_AND_FINISH
395
396
397/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
398 * an extra parameter - dummy for pre-386 variations not eliminated by the
399 * python script. */
400#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr) \
401 do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
402
403/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
404 * an extra parameter, for use in 16-bit and 32-bit code on 386+. */
405#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr) \
406 return iemRegEip32RelativeCallS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32))
407
408/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
409 * an extra parameter, for use in 64-bit code on 386+. */
410#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr) \
411 do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
412
413/** Variant of IEM_MC_REL_CALL_S32_AND_FINISH with instruction length as
414 * an extra parameter - dummy for pre-386 variations not eliminated by the
415 * python script. */
416#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr) \
417 do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
418
419/** Variant of IEM_MC_REL_CALL_S32_AND_FINISH with instruction length as
420 * an extra parameter, for use in 16-bit and 32-bit code on 386+ and we need
421 * to check and clear flags. */
422#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr) \
423 return iemRegEip32RelativeCallS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32))
424
425/** Variant of IEM_MC_REL_CALL_S32_AND_FINISH with instruction length as
426 * an extra parameter, for use in 64-bit code on 386+ and we need
427 * to check and clear flags - dummy for variations not eliminated by the python script. */
428#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr) \
429 do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
430
431
432#undef IEM_MC_REL_CALL_S32_AND_FINISH
433
434
435/** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
436 * an extra parameter, for use in 32-bit code. */
437#define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC32(a_i64, a_cbInstr) \
438 do { RT_NOREF(pVCpu, a_i64, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
439
440/** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
441 * an extra parameter, for use in 64-bit code. */
442#define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC64(a_i64, a_cbInstr) \
443 return iemRegRip64RelativeCallS64AndFinishNoFlags(pVCpu, a_cbInstr, (a_i64))
444
445
446/** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
447 * an extra parameter, for use in 16-bit and 32-bit code on 386+ and we need
448 * to check and clear flags - dummy for variations not eliminated by the python script. */
449#define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i64, a_cbInstr) \
450 do { RT_NOREF(pVCpu, a_i64, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
451
452/** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
453 * an extra parameter, for use in 64-bit code and we need to check and clear
454 * flags. */
455#define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i64, a_cbInstr) \
456 return iemRegRip64RelativeCallS64AndFinishClearingRF(pVCpu, a_cbInstr, (a_i64))
457
458#undef IEM_MC_REL_CALL_S64_AND_FINISH
459
460
461/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for pre-386 targets. */
462#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC16(a_u16NewIP, a_cbInstr) \
463 return iemRegIp16IndirectCallU16AndFinishNoFlags((pVCpu), a_cbInstr, (a_u16NewIP))
464
465/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for 386+ targets. */
466#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC32(a_u16NewIP, a_cbInstr) \
467 return iemRegEip32IndirectCallU16AndFinishNoFlags((pVCpu), a_cbInstr, (a_u16NewIP))
468
469/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for use in 64-bit code. */
470#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC64(a_u16NewIP, a_cbInstr) \
471 do { RT_NOREF(pVCpu, a_u16NewIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
472
473/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for pre-386 targets that checks and
474 * clears flags. */
475#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP, a_cbInstr) \
476 return iemRegIp16IndirectCallU16AndFinishClearingRF((pVCpu), a_cbInstr, (a_u16NewIP))
477
478/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for 386+ targets that checks and
479 * clears flags. */
480#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP, a_cbInstr) \
481 return iemRegEip32IndirectCallU16AndFinishClearingRF((pVCpu), a_cbInstr, (a_u16NewIP))
482
483/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for use in 64-bit code that checks and
484 * clears flags. */
485#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP, a_cbInstr) \
486 do { RT_NOREF(pVCpu, a_u16NewIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
487
488#undef IEM_MC_IND_CALL_U16_AND_FINISH
489
490
491/** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for 386+ targets. */
492#define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP, a_cbInstr) \
493 return iemRegEip32IndirectCallU32AndFinishNoFlags((pVCpu), a_cbInstr, (a_u32NewEIP))
494
495/** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for use in 64-bit code. */
496#define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP, a_cbInstr) \
497 do { RT_NOREF(pVCpu, a_u32NewEIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
498
499/** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for 386+ targets that checks and
500 * clears flags. */
501#define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP, a_cbInstr) \
502 return iemRegEip32IndirectCallU32AndFinishClearingRF((pVCpu), a_cbInstr, (a_u32NewEIP))
503
504/** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for use in 64-bit code that checks
505 * and clears flags. */
506#define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP, a_cbInstr) \
507 do { RT_NOREF(pVCpu, a_u32NewEIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
508
509#undef IEM_MC_IND_CALL_U32_AND_FINISH
510
511
512/** Variant of IEM_MC_IND_CALL_U64_AND_FINISH for use in 64-bit code. */
513#define IEM_MC_IND_CALL_U64_AND_FINISH_THREADED_PC64(a_u32NewRIP, a_cbInstr) \
514 return iemRegRip64IndirectCallU64AndFinishNoFlags((pVCpu), a_cbInstr, (a_u32NewRIP))
515
516/** Variant of IEM_MC_IND_CALL_U64_AND_FINISH for use in 64-bit code that checks
517 * and clears flags. */
518#define IEM_MC_IND_CALL_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewRIP, a_cbInstr) \
519 return iemRegRip64IndirectCallU64AndFinishClearingRF((pVCpu), a_cbInstr, (a_u32NewRIP))
520
521#undef IEM_MC_IND_CALL_U64_AND_FINISH
522
523
524/** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets. */
525#define IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_cbPopArgs, a_cbInstr) \
526 return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_cbPopArgs), IEMMODE_16BIT)
527
528/** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets. */
529#define IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
530 return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
531
532/** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code. */
533#define IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
534 return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
535
536/** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets that checks and
537 * clears flags. */
538#define IEM_MC_RETN_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbPopArgs, a_cbInstr) \
539 return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_cbPopArgs), IEMMODE_16BIT)
540
541/** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets that checks and
542 * clears flags. */
543#define IEM_MC_RETN_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
544 return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
545
546/** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code that checks and
547 * clears flags. */
548#define IEM_MC_RETN_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
549 return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
550
551#undef IEM_MC_RETN_AND_FINISH
552
553
554/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 16-bit. */
555#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_16(a_GCPtrEff, a_bRm, a_u16Disp) \
556 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp)
557
558/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 32-bit. */
559#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_32(a_GCPtrEff, a_bRm, a_uSibAndRspOffset, a_u32Disp) \
560 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_uSibAndRspOffset, a_u32Disp)
561
562/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
563#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
564 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
565
566/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
567#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
568 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
569
570/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters.
571 * @todo How did that address prefix thing work for 64-bit code again? */
572#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
573 (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
574
575#undef IEM_MC_CALC_RM_EFF_ADDR
576
577
578/** Variant of IEM_MC_CALL_CIMPL_1 with explicit instruction length parameter. */
579#define IEM_MC_CALL_CIMPL_1_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
580 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0)
581#undef IEM_MC_CALL_CIMPL_1
582
583/** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */
584#define IEM_MC_CALL_CIMPL_2_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
585 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1)
586#undef IEM_MC_CALL_CIMPL_2
587
588/** Variant of IEM_MC_CALL_CIMPL_3 with explicit instruction length parameter. */
589#define IEM_MC_CALL_CIMPL_3_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
590 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2)
591#undef IEM_MC_CALL_CIMPL_3
592
593/** Variant of IEM_MC_CALL_CIMPL_4 with explicit instruction length parameter. */
594#define IEM_MC_CALL_CIMPL_4_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
595 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3)
596#undef IEM_MC_CALL_CIMPL_4
597
598/** Variant of IEM_MC_CALL_CIMPL_5 with explicit instruction length parameter. */
599#define IEM_MC_CALL_CIMPL_5_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
600 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4)
601#undef IEM_MC_CALL_CIMPL_5
602
603
604/** Variant of IEM_MC_DEFER_TO_CIMPL_0_RET with explicit instruction
605 * length parameter. */
606#define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
607 return (a_pfnCImpl)(pVCpu, (a_cbInstr))
608#undef IEM_MC_DEFER_TO_CIMPL_0_RET
609
610/** Variant of IEM_MC_DEFER_TO_CIMPL_1_RET with explicit instruction
611 * length parameter. */
612#define IEM_MC_DEFER_TO_CIMPL_1_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
613 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0)
614#undef IEM_MC_DEFER_TO_CIMPL_1_RET
615
616/** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */
617#define IEM_MC_DEFER_TO_CIMPL_2_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
618 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1)
619#undef IEM_MC_DEFER_TO_CIMPL_2_RET
620
621/** Variant of IEM_MC_DEFER_TO_CIMPL_3 with explicit instruction length
622 * parameter. */
623#define IEM_MC_DEFER_TO_CIMPL_3_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
624 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2)
625#undef IEM_MC_DEFER_TO_CIMPL_3_RET
626
627/** Variant of IEM_MC_DEFER_TO_CIMPL_4 with explicit instruction length
628 * parameter. */
629#define IEM_MC_DEFER_TO_CIMPL_4_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
630 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3)
631#undef IEM_MC_DEFER_TO_CIMPL_4_RET
632
633/** Variant of IEM_MC_DEFER_TO_CIMPL_5 with explicit instruction length
634 * parameter. */
635#define IEM_MC_DEFER_TO_CIMPL_5_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
636 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4)
637#undef IEM_MC_DEFER_TO_CIMPL_5_RET
638
639
640/** Variant of IEM_MC_FETCH_GREG_U8 with extended (20) register index. */
641#define IEM_MC_FETCH_GREG_U8_THREADED(a_u8Dst, a_iGRegEx) \
642 (a_u8Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
643
644/** Variant of IEM_MC_FETCH_GREG_U8_ZX_U16 with extended (20) register index. */
645#define IEM_MC_FETCH_GREG_U8_ZX_U16_THREADED(a_u16Dst, a_iGRegEx) \
646 (a_u16Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
647
648/** Variant of IEM_MC_FETCH_GREG_U8_ZX_U32 with extended (20) register index. */
649#define IEM_MC_FETCH_GREG_U8_ZX_U32_THREADED(a_u32Dst, a_iGRegEx) \
650 (a_u32Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
651
652/** Variant of IEM_MC_FETCH_GREG_U8_ZX_U64 with extended (20) register index. */
653#define IEM_MC_FETCH_GREG_U8_ZX_U64_THREADED(a_u64Dst, a_iGRegEx) \
654 (a_u64Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
655
656/** Variant of IEM_MC_FETCH_GREG_U8_SX_U16 with extended (20) register index. */
657#define IEM_MC_FETCH_GREG_U8_SX_U16_THREADED(a_u16Dst, a_iGRegEx) \
658 (a_u16Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
659
660/** Variant of IEM_MC_FETCH_GREG_U8_SX_U32 with extended (20) register index. */
661#define IEM_MC_FETCH_GREG_U8_SX_U32_THREADED(a_u32Dst, a_iGRegEx) \
662 (a_u32Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
663#undef IEM_MC_FETCH_GREG_U8_SX_U32
664
665/** Variant of IEM_MC_FETCH_GREG_U8_SX_U64 with extended (20) register index. */
666#define IEM_MC_FETCH_GREG_U8_SX_U64_THREADED(a_u64Dst, a_iGRegEx) \
667 (a_u64Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
668#undef IEM_MC_FETCH_GREG_U8_SX_U64
669
670/** Variant of IEM_MC_STORE_GREG_U8 with extended (20) register index. */
671#define IEM_MC_STORE_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
672 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
673#undef IEM_MC_STORE_GREG_U8
674
675/** Variant of IEM_MC_STORE_GREG_U8_CONST with extended (20) register index. */
676#define IEM_MC_STORE_GREG_U8_CONST_THREADED(a_iGRegEx, a_u8Value) \
677 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
678#undef IEM_MC_STORE_GREG_U8
679
680/** Variant of IEM_MC_REF_GREG_U8 with extended (20) register index. */
681#define IEM_MC_REF_GREG_U8_THREADED(a_pu8Dst, a_iGRegEx) \
682 (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx))
683#undef IEM_MC_REF_GREG_U8
684
685/** Variant of IEM_MC_REF_GREG_U8_CONST with extended (20) register index. */
686#define IEM_MC_REF_GREG_U8_CONST_THREADED(a_pu8Dst, a_iGRegEx) \
687 (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx))
688#undef IEM_MC_REF_GREG_U8
689
690/** Variant of IEM_MC_ADD_GREG_U8_TO_LOCAL with extended (20) register index. */
691#define IEM_MC_ADD_GREG_U8_TO_LOCAL_THREADED(a_u8Value, a_iGRegEx) \
692 do { (a_u8Value) += iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)); } while (0)
693#undef IEM_MC_ADD_GREG_U8_TO_LOCAL
694
695/** Variant of IEM_MC_AND_GREG_U8 with extended (20) register index. */
696#define IEM_MC_AND_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
697 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) &= (a_u8Value)
698#undef IEM_MC_AND_GREG_U8
699
700/** Variant of IEM_MC_OR_GREG_U8 with extended (20) register index. */
701#define IEM_MC_OR_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
702 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) |= (a_u8Value)
703#undef IEM_MC_OR_GREG_U8
704
705
706/** For asserting that only declared output flags changed. */
707#ifndef VBOX_STRICT
708# define IEM_MC_ASSERT_EFLAGS(a_fEflInput, a_fEflOutput) ((void)0)
709#else
710# undef IEM_MC_REF_EFLAGS_EX
711# define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) \
712 uint32_t const fEflAssert = pVCpu->cpum.GstCtx.eflags.uBoth; \
713 IEM_MC_REF_EFLAGS(a_pEFlags)
714# define IEM_MC_ASSERT_EFLAGS(a_fEflInput, a_fEflOutput) \
715 AssertMsg((pVCpu->cpum.GstCtx.eflags.uBoth & ~(a_fEflOutput)) == (fEflAssert & ~(a_fEflOutput)), \
716 ("now %#x (%#x), was %#x (%#x) - diff %#x; a_fEflOutput=%#x\n", \
717 (pVCpu->cpum.GstCtx.eflags.uBoth & ~(a_fEflOutput)), pVCpu->cpum.GstCtx.eflags.uBoth, \
718 (fEflAssert & ~(a_fEflOutput)), fEflAssert, \
719 (pVCpu->cpum.GstCtx.eflags.uBoth ^ fEflAssert) & ~(a_fEflOutput), a_fEflOutput))
720#endif
721
722
723
724/**
725 * Calculates the effective address of a ModR/M memory operand, 16-bit
726 * addressing variant.
727 *
728 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16.
729 *
730 * @returns The effective address.
731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
732 * @param bRm The ModRM byte.
733 * @param u16Disp The displacement byte/word, if any.
734 * RIP relative addressing.
735 */
736static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr16(PVMCPUCC pVCpu, uint8_t bRm, uint16_t u16Disp) RT_NOEXCEPT
737{
738 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: bRm=%#x u16Disp=%#x\n", bRm, u16Disp));
739 Assert(!IEM_IS_64BIT_CODE(pVCpu));
740
741 /* Handle the disp16 form with no registers first. */
742 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
743 {
744 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16Disp));
745 return u16Disp;
746 }
747
748 /* Get the displacment. */
749 /** @todo we can eliminate this step by making u16Disp have this value
750 * already! */
751 uint16_t u16EffAddr;
752 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
753 {
754 case 0: u16EffAddr = 0; break;
755 case 1: u16EffAddr = (int16_t)(int8_t)u16Disp; break;
756 case 2: u16EffAddr = u16Disp; break;
757 default: AssertFailedStmt(u16EffAddr = 0);
758 }
759
760 /* Add the base and index registers to the disp. */
761 switch (bRm & X86_MODRM_RM_MASK)
762 {
763 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
764 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
765 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; break;
766 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; break;
767 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
768 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
769 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; break;
770 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
771 }
772
773 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16EffAddr));
774 return u16EffAddr;
775}
776
777
778/**
779 * Calculates the effective address of a ModR/M memory operand, 32-bit
780 * addressing variant.
781 *
782 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32 and
783 * IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT.
784 *
785 * @returns The effective address.
786 * @param pVCpu The cross context virtual CPU structure of the
787 * calling thread.
788 * @param bRm The ModRM byte.
789 * @param uSibAndRspOffset Two parts:
790 * - The first 8 bits make up the SIB byte.
791 * - The next 8 bits are the fixed RSP/ESP offse
792 * in case of a pop [xSP].
793 * @param u32Disp The displacement byte/dword, if any.
794 */
795static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr32(PVMCPUCC pVCpu, uint8_t bRm, uint32_t uSibAndRspOffset,
796 uint32_t u32Disp) RT_NOEXCEPT
797{
798 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: bRm=%#x uSibAndRspOffset=%#x u32Disp=%#x\n", bRm, uSibAndRspOffset, u32Disp));
799
800 /* Handle the disp32 form with no registers first. */
801 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
802 {
803 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32Disp));
804 return u32Disp;
805 }
806
807 /* Get the register (or SIB) value. */
808 uint32_t u32EffAddr;
809#ifdef _MSC_VER
810 u32EffAddr = 0;/* MSC uninitialized variable analysis is too simple, it seems. */
811#endif
812 switch (bRm & X86_MODRM_RM_MASK)
813 {
814 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
815 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
816 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
817 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
818 case 4: /* SIB */
819 {
820 /* Get the index and scale it. */
821 switch ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
822 {
823 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
824 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
825 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
826 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
827 case 4: u32EffAddr = 0; /*none */ break;
828 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
829 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
830 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
831 }
832 u32EffAddr <<= (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
833
834 /* add base */
835 switch (uSibAndRspOffset & X86_SIB_BASE_MASK)
836 {
837 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
838 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
839 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
840 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
841 case 4:
842 u32EffAddr += pVCpu->cpum.GstCtx.esp;
843 u32EffAddr += uSibAndRspOffset >> 8;
844 break;
845 case 5:
846 if ((bRm & X86_MODRM_MOD_MASK) != 0)
847 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
848 else
849 u32EffAddr += u32Disp;
850 break;
851 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
852 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
853 }
854 break;
855 }
856 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
857 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
858 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
859 }
860
861 /* Get and add the displacement. */
862 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
863 {
864 case 0: break;
865 case 1: u32EffAddr += (int8_t)u32Disp; break;
866 case 2: u32EffAddr += u32Disp; break;
867 default: AssertFailed();
868 }
869
870 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32EffAddr));
871 return u32EffAddr;
872}
873
874
875/**
876 * Calculates the effective address of a ModR/M memory operand.
877 *
878 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64.
879 *
880 * @returns The effective address.
881 * @param pVCpu The cross context virtual CPU structure of the
882 * calling thread.
883 * @param bRmEx The ModRM byte but with bit 3 set to REX.B and
884 * bit 4 to REX.X. The two bits are part of the
885 * REG sub-field, which isn't needed in this
886 * function.
887 * @param uSibAndRspOffset Two parts:
888 * - The first 8 bits make up the SIB byte.
889 * - The next 8 bits are the fixed RSP/ESP offset
890 * in case of a pop [xSP].
891 * @param u32Disp The displacement byte/word/dword, if any.
892 * @param cbInstr The size of the fully decoded instruction. Used
893 * for RIP relative addressing.
894 * @todo combine cbInstr and cbImm!
895 */
896static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr64(PVMCPUCC pVCpu, uint8_t bRmEx, uint32_t uSibAndRspOffset,
897 uint32_t u32Disp, uint8_t cbInstr) RT_NOEXCEPT
898{
899 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: bRmEx=%#x\n", bRmEx));
900 Assert(IEM_IS_64BIT_CODE(pVCpu));
901
902 uint64_t u64EffAddr;
903
904 /* Handle the rip+disp32 form with no registers first. */
905 if ((bRmEx & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
906 {
907 u64EffAddr = (int32_t)u32Disp;
908 u64EffAddr += pVCpu->cpum.GstCtx.rip + cbInstr;
909 }
910 else
911 {
912 /* Get the register (or SIB) value. */
913#ifdef _MSC_VER
914 u64EffAddr = 0; /* MSC uninitialized variable analysis is too simple, it seems. */
915#endif
916 switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */
917 {
918 default:
919 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
920 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
921 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
922 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
923 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
924 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
925 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
926 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
927 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
928 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
929 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
930 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
931 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
932 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
933 /* SIB */
934 case 4:
935 case 12:
936 {
937 /* Get the index and scale it. */
938 switch ( ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
939 | ((bRmEx & 0x10) >> 1)) /* bRmEx[bit 4] = REX.X */
940 {
941 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
942 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
943 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
944 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
945 case 4: u64EffAddr = 0; /*none */ break;
946 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
947 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
948 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
949 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
950 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
951 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
952 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
953 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
954 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
955 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
956 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
957 }
958 u64EffAddr <<= (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
959
960 /* add base */
961 switch ((uSibAndRspOffset & X86_SIB_BASE_MASK) | (bRmEx & 0x8)) /* bRmEx[bit 3] = REX.B */
962 {
963 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
964 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
965 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
966 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
967 case 4:
968 u64EffAddr += pVCpu->cpum.GstCtx.rsp;
969 u64EffAddr += uSibAndRspOffset >> 8;
970 break;
971 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
972 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
973 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
974 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
975 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
976 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
977 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
978 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
979 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
980 /* complicated encodings */
981 case 5:
982 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
983 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
984 else
985 u64EffAddr += (int32_t)u32Disp;
986 break;
987 case 13:
988 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
989 u64EffAddr += pVCpu->cpum.GstCtx.r13;
990 else
991 u64EffAddr += (int32_t)u32Disp;
992 break;
993 }
994 break;
995 }
996 }
997
998 /* Get and add the displacement. */
999 switch ((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1000 {
1001 case 0: break;
1002 case 1: u64EffAddr += (int8_t)u32Disp; break;
1003 case 2: u64EffAddr += (int32_t)u32Disp; break;
1004 default: AssertFailed();
1005 }
1006 }
1007
1008 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: EffAddr=%#010RGv\n", u64EffAddr));
1009 return u64EffAddr;
1010}
1011
1012
1013/*
1014 * The threaded functions.
1015 */
1016#include "IEMThreadedFunctions.cpp.h"
1017
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette