1 | /* $Id: IEMAllThrdFuncs.cpp 104419 2024-04-24 14:32:29Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Instruction Decoding and Emulation, Threaded Functions.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2011-2023 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | /*********************************************************************************************************************************
|
---|
30 | * Header Files *
|
---|
31 | *********************************************************************************************************************************/
|
---|
32 | #ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
|
---|
33 | # define LOG_GROUP LOG_GROUP_IEM
|
---|
34 | #endif
|
---|
35 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
36 | #define IEM_WITH_OPAQUE_DECODER_STATE
|
---|
37 | #include <VBox/vmm/iem.h>
|
---|
38 | #include <VBox/vmm/cpum.h>
|
---|
39 | #include <VBox/vmm/apic.h>
|
---|
40 | #include <VBox/vmm/pdm.h>
|
---|
41 | #include <VBox/vmm/pgm.h>
|
---|
42 | #include <VBox/vmm/iom.h>
|
---|
43 | #include <VBox/vmm/em.h>
|
---|
44 | #include <VBox/vmm/hm.h>
|
---|
45 | #include <VBox/vmm/nem.h>
|
---|
46 | #include <VBox/vmm/gim.h>
|
---|
47 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
48 | # include <VBox/vmm/em.h>
|
---|
49 | # include <VBox/vmm/hm_svm.h>
|
---|
50 | #endif
|
---|
51 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
52 | # include <VBox/vmm/hmvmxinline.h>
|
---|
53 | #endif
|
---|
54 | #include <VBox/vmm/tm.h>
|
---|
55 | #include <VBox/vmm/dbgf.h>
|
---|
56 | #include <VBox/vmm/dbgftrace.h>
|
---|
57 | #include "IEMInternal.h"
|
---|
58 | #include <VBox/vmm/vmcc.h>
|
---|
59 | #include <VBox/log.h>
|
---|
60 | #include <VBox/err.h>
|
---|
61 | #include <VBox/param.h>
|
---|
62 | #include <VBox/dis.h>
|
---|
63 | #include <VBox/disopcode-x86-amd64.h>
|
---|
64 | #include <iprt/asm-math.h>
|
---|
65 | #include <iprt/assert.h>
|
---|
66 | #include <iprt/string.h>
|
---|
67 | #include <iprt/x86.h>
|
---|
68 |
|
---|
69 | #include "IEMInline.h"
|
---|
70 | #include "IEMMc.h"
|
---|
71 |
|
---|
72 | #include "IEMThreadedFunctions.h"
|
---|
73 |
|
---|
74 |
|
---|
75 | /*********************************************************************************************************************************
|
---|
76 | * Defined Constants And Macros *
|
---|
77 | *********************************************************************************************************************************/
|
---|
78 |
|
---|
79 | /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
|
---|
80 | * and only used when we're in 16-bit code on a pre-386 CPU. */
|
---|
81 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal) \
|
---|
82 | return iemRegAddToIp16AndFinishingNoFlags(pVCpu, a_cbInstr, a_rcNormal)
|
---|
83 |
|
---|
84 | /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
|
---|
85 | * and used for 16-bit and 32-bit code on 386 and later CPUs. */
|
---|
86 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32(a_cbInstr, a_rcNormal) \
|
---|
87 | return iemRegAddToEip32AndFinishingNoFlags(pVCpu, a_cbInstr, a_rcNormal)
|
---|
88 |
|
---|
89 | /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
|
---|
90 | * and only used when we're in 64-bit code. */
|
---|
91 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64(a_cbInstr, a_rcNormal) \
|
---|
92 | return iemRegAddToRip64AndFinishingNoFlags(pVCpu, a_cbInstr, a_rcNormal)
|
---|
93 |
|
---|
94 |
|
---|
95 | /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
|
---|
96 | * and only used when we're in 16-bit code on a pre-386 CPU and we need to
|
---|
97 | * check and clear flags. */
|
---|
98 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal) \
|
---|
99 | return iemRegAddToIp16AndFinishingClearingRF(pVCpu, a_cbInstr, a_rcNormal)
|
---|
100 |
|
---|
101 | /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
|
---|
102 | * and used for 16-bit and 32-bit code on 386 and later CPUs and we need to
|
---|
103 | * check and clear flags. */
|
---|
104 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbInstr, a_rcNormal) \
|
---|
105 | return iemRegAddToEip32AndFinishingClearingRF(pVCpu, a_cbInstr, a_rcNormal)
|
---|
106 |
|
---|
107 | /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
|
---|
108 | * and only used when we're in 64-bit code and we need to check and clear
|
---|
109 | * flags. */
|
---|
110 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbInstr, a_rcNormal) \
|
---|
111 | return iemRegAddToRip64AndFinishingClearingRF(pVCpu, a_cbInstr, a_rcNormal)
|
---|
112 |
|
---|
113 | #undef IEM_MC_ADVANCE_RIP_AND_FINISH
|
---|
114 |
|
---|
115 |
|
---|
116 | /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as extra
|
---|
117 | * parameter, for use in 16-bit code on a pre-386 CPU. */
|
---|
118 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal) \
|
---|
119 | return iemRegIp16RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_rcNormal)
|
---|
120 |
|
---|
121 | /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
|
---|
122 | * size as extra parameters, for use in 16-bit and 32-bit code on 386 and
|
---|
123 | * later CPUs. */
|
---|
124 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
|
---|
125 | return iemRegEip32RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
|
---|
126 |
|
---|
127 | /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
|
---|
128 | * size as extra parameters, for use in 64-bit code. */
|
---|
129 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
|
---|
130 | return iemRegRip64RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
|
---|
131 |
|
---|
132 |
|
---|
133 | /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as extra
|
---|
134 | * parameter, for use in 16-bit code on a pre-386 CPU and we need to check and
|
---|
135 | * clear flags. */
|
---|
136 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal) \
|
---|
137 | return iemRegIp16RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_rcNormal)
|
---|
138 |
|
---|
139 | /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
|
---|
140 | * size as extra parameters, for use in 16-bit and 32-bit code on 386 and
|
---|
141 | * later CPUs and we need to check and clear flags. */
|
---|
142 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
|
---|
143 | return iemRegEip32RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
|
---|
144 |
|
---|
145 | /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
|
---|
146 | * size as extra parameters, for use in 64-bit code and we need to check and
|
---|
147 | * clear flags. */
|
---|
148 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
|
---|
149 | return iemRegRip64RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
|
---|
150 |
|
---|
151 | #undef IEM_MC_REL_JMP_S8_AND_FINISH
|
---|
152 |
|
---|
153 |
|
---|
154 | /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
|
---|
155 | * param, for use in 16-bit code on a pre-386 CPU. */
|
---|
156 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal) \
|
---|
157 | return iemRegEip32RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
|
---|
158 |
|
---|
159 | /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
|
---|
160 | * param, for use in 16-bit and 32-bit code on 386 and later CPUs. */
|
---|
161 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal) \
|
---|
162 | return iemRegEip32RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
|
---|
163 |
|
---|
164 | /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
|
---|
165 | * param, for use in 64-bit code. */
|
---|
166 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) \
|
---|
167 | return iemRegRip64RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
|
---|
168 |
|
---|
169 |
|
---|
170 | /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
|
---|
171 | * param, for use in 16-bit code on a pre-386 CPU and we need to check and
|
---|
172 | * clear flags. */
|
---|
173 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
|
---|
174 | return iemRegEip32RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
|
---|
175 |
|
---|
176 | /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
|
---|
177 | * param, for use in 16-bit and 32-bit code on 386 and later CPUs and we need
|
---|
178 | * to check and clear flags. */
|
---|
179 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
|
---|
180 | return iemRegEip32RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
|
---|
181 |
|
---|
182 | /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
|
---|
183 | * param, for use in 64-bit code and we need to check and clear flags. */
|
---|
184 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
|
---|
185 | return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
|
---|
186 |
|
---|
187 | #undef IEM_MC_REL_JMP_S16_AND_FINISH
|
---|
188 |
|
---|
189 |
|
---|
190 | /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
|
---|
191 | * an extra parameter - dummy for pre-386 variations not eliminated by the
|
---|
192 | * python script. */
|
---|
193 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal) \
|
---|
194 | do { RT_NOREF(pVCpu, a_i32, a_cbInstr, a_rcNormal); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
195 |
|
---|
196 | /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
|
---|
197 | * an extra parameter, for use in 16-bit and 32-bit code on 386+. */
|
---|
198 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal) \
|
---|
199 | return iemRegEip32RelativeJumpS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
|
---|
200 |
|
---|
201 | /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
|
---|
202 | * an extra parameter, for use in 64-bit code. */
|
---|
203 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) \
|
---|
204 | return iemRegRip64RelativeJumpS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
|
---|
205 |
|
---|
206 |
|
---|
207 | /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
|
---|
208 | * an extra parameter - dummy for pre-386 variations not eliminated by the
|
---|
209 | * python script. */
|
---|
210 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
|
---|
211 | do { RT_NOREF(pVCpu, a_i32, a_cbInstr, a_rcNormal); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
212 |
|
---|
213 | /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
|
---|
214 | * an extra parameter, for use in 16-bit and 32-bit code on 386+ and we need
|
---|
215 | * to check and clear flags. */
|
---|
216 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
|
---|
217 | return iemRegEip32RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
|
---|
218 |
|
---|
219 | /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
|
---|
220 | * an extra parameter, for use in 64-bit code and we need to check and clear
|
---|
221 | * flags. */
|
---|
222 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
|
---|
223 | return iemRegRip64RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
|
---|
224 |
|
---|
225 | #undef IEM_MC_REL_JMP_S32_AND_FINISH
|
---|
226 |
|
---|
227 |
|
---|
228 |
|
---|
229 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets. */
|
---|
230 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP) \
|
---|
231 | return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP))
|
---|
232 |
|
---|
233 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets. */
|
---|
234 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP) \
|
---|
235 | return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP))
|
---|
236 |
|
---|
237 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code. */
|
---|
238 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP) \
|
---|
239 | return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP))
|
---|
240 |
|
---|
241 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets that checks and
|
---|
242 | * clears flags. */
|
---|
243 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP) \
|
---|
244 | return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), 0 /* cbInstr - not used */)
|
---|
245 |
|
---|
246 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets that checks and
|
---|
247 | * clears flags. */
|
---|
248 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP) \
|
---|
249 | return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), 0 /* cbInstr - not used */)
|
---|
250 |
|
---|
251 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code that checks and
|
---|
252 | * clears flags. */
|
---|
253 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP) \
|
---|
254 | return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), 0 /* cbInstr - not used */)
|
---|
255 |
|
---|
256 | #undef IEM_MC_SET_RIP_U16_AND_FINISH
|
---|
257 |
|
---|
258 |
|
---|
259 | /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets. */
|
---|
260 | #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP) \
|
---|
261 | return iemRegRipJumpU32AndFinishNoFlags((pVCpu), (a_u32NewEIP))
|
---|
262 |
|
---|
263 | /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code. */
|
---|
264 | #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP) \
|
---|
265 | return iemRegRipJumpU32AndFinishNoFlags((pVCpu), (a_u32NewEIP))
|
---|
266 |
|
---|
267 | /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets that checks and
|
---|
268 | * clears flags. */
|
---|
269 | #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP) \
|
---|
270 | return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewEIP), 0 /* cbInstr - not used */)
|
---|
271 |
|
---|
272 | /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code that checks
|
---|
273 | * and clears flags. */
|
---|
274 | #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) \
|
---|
275 | return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewEIP), 0 /* cbInstr - not used */)
|
---|
276 |
|
---|
277 | #undef IEM_MC_SET_RIP_U32_AND_FINISH
|
---|
278 |
|
---|
279 |
|
---|
280 | /** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code. */
|
---|
281 | #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP) \
|
---|
282 | return iemRegRipJumpU64AndFinishNoFlags((pVCpu), (a_u32NewEIP))
|
---|
283 |
|
---|
284 | /** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code that checks
|
---|
285 | * and clears flags. */
|
---|
286 | #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) \
|
---|
287 | return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u32NewEIP), 0 /* cbInstr - not used */)
|
---|
288 |
|
---|
289 | #undef IEM_MC_SET_RIP_U64_AND_FINISH
|
---|
290 |
|
---|
291 |
|
---|
292 | /** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
|
---|
293 | * param, for use in 16-bit code on a pre-386 CPU. */
|
---|
294 | #define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr) \
|
---|
295 | return iemRegRipRelativeCallS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16))
|
---|
296 |
|
---|
297 | /** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
|
---|
298 | * param, for use in 16-bit and 32-bit code on 386 and later CPUs. */
|
---|
299 | #define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr) \
|
---|
300 | return iemRegRipRelativeCallS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16))
|
---|
301 |
|
---|
302 | /** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
|
---|
303 | * param, for use in 64-bit code. */
|
---|
304 | #define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr) \
|
---|
305 | return iemRegRipRelativeCallS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16))
|
---|
306 |
|
---|
307 |
|
---|
308 | /** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
|
---|
309 | * param, for use in 16-bit code on a pre-386 CPU and we need to check and
|
---|
310 | * clear flags. */
|
---|
311 | #define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr) \
|
---|
312 | return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
|
---|
313 |
|
---|
314 | /** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
|
---|
315 | * param, for use in 16-bit and 32-bit code on 386 and later CPUs and we need
|
---|
316 | * to check and clear flags. */
|
---|
317 | #define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr) \
|
---|
318 | return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
|
---|
319 |
|
---|
320 | /** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
|
---|
321 | * param, for use in 64-bit code and we need to check and clear flags. */
|
---|
322 | #define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr) \
|
---|
323 | return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
|
---|
324 |
|
---|
325 | #undef IEM_MC_REL_CALL_S16_AND_FINISH
|
---|
326 |
|
---|
327 |
|
---|
328 | /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
|
---|
329 | * an extra parameter - dummy for pre-386 variations not eliminated by the
|
---|
330 | * python script. */
|
---|
331 | #define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr) \
|
---|
332 | do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
333 |
|
---|
334 | /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
|
---|
335 | * an extra parameter, for use in 16-bit and 32-bit code on 386+. */
|
---|
336 | #define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr) \
|
---|
337 | return iemRegEip32RelativeCallS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32))
|
---|
338 |
|
---|
339 | /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
|
---|
340 | * an extra parameter, for use in 64-bit code on 386+. */
|
---|
341 | #define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr) \
|
---|
342 | do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
343 |
|
---|
344 | /** Variant of IEM_MC_REL_CALL_S32_AND_FINISH with instruction length as
|
---|
345 | * an extra parameter - dummy for pre-386 variations not eliminated by the
|
---|
346 | * python script. */
|
---|
347 | #define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr) \
|
---|
348 | do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
349 |
|
---|
350 | /** Variant of IEM_MC_REL_CALL_S32_AND_FINISH with instruction length as
|
---|
351 | * an extra parameter, for use in 16-bit and 32-bit code on 386+ and we need
|
---|
352 | * to check and clear flags. */
|
---|
353 | #define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr) \
|
---|
354 | return iemRegEip32RelativeCallS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32))
|
---|
355 |
|
---|
356 | /** Variant of IEM_MC_REL_CALL_S32_AND_FINISH with instruction length as
|
---|
357 | * an extra parameter, for use in 64-bit code on 386+ and we need
|
---|
358 | * to check and clear flags - dummy for variations not eliminated by the python script. */
|
---|
359 | #define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr) \
|
---|
360 | do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
361 |
|
---|
362 |
|
---|
363 | #undef IEM_MC_REL_CALL_S32_AND_FINISH
|
---|
364 |
|
---|
365 |
|
---|
366 | /** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
|
---|
367 | * an extra parameter, for use in 32-bit code. */
|
---|
368 | #define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC32(a_i64, a_cbInstr) \
|
---|
369 | do { RT_NOREF(pVCpu, a_i64, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
370 |
|
---|
371 | /** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
|
---|
372 | * an extra parameter, for use in 64-bit code. */
|
---|
373 | #define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC64(a_i64, a_cbInstr) \
|
---|
374 | return iemRegRip64RelativeCallS64AndFinishNoFlags(pVCpu, a_cbInstr, (a_i64))
|
---|
375 |
|
---|
376 |
|
---|
377 | /** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
|
---|
378 | * an extra parameter, for use in 16-bit and 32-bit code on 386+ and we need
|
---|
379 | * to check and clear flags - dummy for variations not eliminated by the python script. */
|
---|
380 | #define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i64, a_cbInstr) \
|
---|
381 | do { RT_NOREF(pVCpu, a_i64, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
382 |
|
---|
383 | /** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
|
---|
384 | * an extra parameter, for use in 64-bit code and we need to check and clear
|
---|
385 | * flags. */
|
---|
386 | #define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i64, a_cbInstr) \
|
---|
387 | return iemRegRip64RelativeCallS64AndFinishClearingRF(pVCpu, a_cbInstr, (a_i64))
|
---|
388 |
|
---|
389 | #undef IEM_MC_REL_CALL_S64_AND_FINISH
|
---|
390 |
|
---|
391 |
|
---|
392 | /** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for pre-386 targets. */
|
---|
393 | #define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC16(a_u16NewIP, a_cbInstr) \
|
---|
394 | return iemRegIp16IndirectCallU16AndFinishNoFlags((pVCpu), a_cbInstr, (a_u16NewIP))
|
---|
395 |
|
---|
396 | /** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for 386+ targets. */
|
---|
397 | #define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC32(a_u16NewIP, a_cbInstr) \
|
---|
398 | return iemRegEip32IndirectCallU16AndFinishNoFlags((pVCpu), a_cbInstr, (a_u16NewIP))
|
---|
399 |
|
---|
400 | /** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for use in 64-bit code. */
|
---|
401 | #define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC64(a_u16NewIP, a_cbInstr) \
|
---|
402 | do { RT_NOREF(pVCpu, a_u16NewIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
403 |
|
---|
404 | /** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for pre-386 targets that checks and
|
---|
405 | * clears flags. */
|
---|
406 | #define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP, a_cbInstr) \
|
---|
407 | return iemRegIp16IndirectCallU16AndFinishClearingRF((pVCpu), a_cbInstr, (a_u16NewIP))
|
---|
408 |
|
---|
409 | /** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for 386+ targets that checks and
|
---|
410 | * clears flags. */
|
---|
411 | #define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP, a_cbInstr) \
|
---|
412 | return iemRegEip32IndirectCallU16AndFinishClearingRF((pVCpu), a_cbInstr, (a_u16NewIP))
|
---|
413 |
|
---|
414 | /** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for use in 64-bit code that checks and
|
---|
415 | * clears flags. */
|
---|
416 | #define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP, a_cbInstr) \
|
---|
417 | do { RT_NOREF(pVCpu, a_u16NewIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
418 |
|
---|
419 | #undef IEM_MC_IND_CALL_U16_AND_FINISH
|
---|
420 |
|
---|
421 |
|
---|
422 | /** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for 386+ targets. */
|
---|
423 | #define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP, a_cbInstr) \
|
---|
424 | return iemRegEip32IndirectCallU32AndFinishNoFlags((pVCpu), a_cbInstr, (a_u32NewEIP))
|
---|
425 |
|
---|
426 | /** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for use in 64-bit code. */
|
---|
427 | #define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP, a_cbInstr) \
|
---|
428 | do { RT_NOREF(pVCpu, a_u32NewEIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
429 |
|
---|
430 | /** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for 386+ targets that checks and
|
---|
431 | * clears flags. */
|
---|
432 | #define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP, a_cbInstr) \
|
---|
433 | return iemRegEip32IndirectCallU32AndFinishClearingRF((pVCpu), a_cbInstr, (a_u32NewEIP))
|
---|
434 |
|
---|
435 | /** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for use in 64-bit code that checks
|
---|
436 | * and clears flags. */
|
---|
437 | #define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP, a_cbInstr) \
|
---|
438 | do { RT_NOREF(pVCpu, a_u32NewEIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
|
---|
439 |
|
---|
440 | #undef IEM_MC_IND_CALL_U32_AND_FINISH
|
---|
441 |
|
---|
442 |
|
---|
443 | /** Variant of IEM_MC_IND_CALL_U64_AND_FINISH for use in 64-bit code. */
|
---|
444 | #define IEM_MC_IND_CALL_U64_AND_FINISH_THREADED_PC64(a_u32NewRIP, a_cbInstr) \
|
---|
445 | return iemRegRip64IndirectCallU64AndFinishNoFlags((pVCpu), a_cbInstr, (a_u32NewRIP))
|
---|
446 |
|
---|
447 | /** Variant of IEM_MC_IND_CALL_U64_AND_FINISH for use in 64-bit code that checks
|
---|
448 | * and clears flags. */
|
---|
449 | #define IEM_MC_IND_CALL_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewRIP, a_cbInstr) \
|
---|
450 | return iemRegRip64IndirectCallU64AndFinishClearingRF((pVCpu), a_cbInstr, (a_u32NewRIP))
|
---|
451 |
|
---|
452 | #undef IEM_MC_IND_CALL_U64_AND_FINISH
|
---|
453 |
|
---|
454 |
|
---|
455 | /** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets. */
|
---|
456 | #define IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_u16Pop, a_cbInstr) \
|
---|
457 | return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_u16Pop), IEMMODE_16BIT)
|
---|
458 |
|
---|
459 | /** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets. */
|
---|
460 | #define IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
|
---|
461 | return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_u16Pop), (a_enmEffOpSize))
|
---|
462 |
|
---|
463 | /** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code. */
|
---|
464 | #define IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
|
---|
465 | return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_u16Pop), (a_enmEffOpSize))
|
---|
466 |
|
---|
467 | /** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets that checks and
|
---|
468 | * clears flags. */
|
---|
469 | #define IEM_MC_RETN_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16Pop, a_cbInstr) \
|
---|
470 | return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_u16Pop), IEMMODE_16BIT)
|
---|
471 |
|
---|
472 | /** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets that checks and
|
---|
473 | * clears flags. */
|
---|
474 | #define IEM_MC_RETN_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
|
---|
475 | return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_u16Pop), (a_enmEffOpSize))
|
---|
476 |
|
---|
477 | /** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code that checks and
|
---|
478 | * clears flags. */
|
---|
479 | #define IEM_MC_RETN_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
|
---|
480 | return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_u16Pop), (a_enmEffOpSize))
|
---|
481 |
|
---|
482 | #undef IEM_MC_RETN_AND_FINISH
|
---|
483 |
|
---|
484 |
|
---|
485 | /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 16-bit. */
|
---|
486 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_16(a_GCPtrEff, a_bRm, a_u16Disp) \
|
---|
487 | (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp)
|
---|
488 |
|
---|
489 | /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 32-bit. */
|
---|
490 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_32(a_GCPtrEff, a_bRm, a_uSibAndRspOffset, a_u32Disp) \
|
---|
491 | (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_uSibAndRspOffset, a_u32Disp)
|
---|
492 |
|
---|
493 | /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
|
---|
494 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
|
---|
495 | (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
|
---|
496 |
|
---|
497 | /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
|
---|
498 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
|
---|
499 | (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
|
---|
500 |
|
---|
501 | /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters.
|
---|
502 | * @todo How did that address prefix thing work for 64-bit code again? */
|
---|
503 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
|
---|
504 | (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
|
---|
505 |
|
---|
506 | #undef IEM_MC_CALC_RM_EFF_ADDR
|
---|
507 |
|
---|
508 |
|
---|
509 | /** Variant of IEM_MC_CALL_CIMPL_1 with explicit instruction length parameter. */
|
---|
510 | #define IEM_MC_CALL_CIMPL_1_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
|
---|
511 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0)
|
---|
512 | #undef IEM_MC_CALL_CIMPL_1
|
---|
513 |
|
---|
514 | /** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */
|
---|
515 | #define IEM_MC_CALL_CIMPL_2_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
|
---|
516 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1)
|
---|
517 | #undef IEM_MC_CALL_CIMPL_2
|
---|
518 |
|
---|
519 | /** Variant of IEM_MC_CALL_CIMPL_3 with explicit instruction length parameter. */
|
---|
520 | #define IEM_MC_CALL_CIMPL_3_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
|
---|
521 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2)
|
---|
522 | #undef IEM_MC_CALL_CIMPL_3
|
---|
523 |
|
---|
524 | /** Variant of IEM_MC_CALL_CIMPL_4 with explicit instruction length parameter. */
|
---|
525 | #define IEM_MC_CALL_CIMPL_4_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
|
---|
526 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3)
|
---|
527 | #undef IEM_MC_CALL_CIMPL_4
|
---|
528 |
|
---|
529 | /** Variant of IEM_MC_CALL_CIMPL_5 with explicit instruction length parameter. */
|
---|
530 | #define IEM_MC_CALL_CIMPL_5_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
|
---|
531 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4)
|
---|
532 | #undef IEM_MC_CALL_CIMPL_5
|
---|
533 |
|
---|
534 |
|
---|
535 | /** Variant of IEM_MC_DEFER_TO_CIMPL_0_RET with explicit instruction
|
---|
536 | * length parameter. */
|
---|
537 | #define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
|
---|
538 | return (a_pfnCImpl)(pVCpu, (a_cbInstr))
|
---|
539 | #undef IEM_MC_DEFER_TO_CIMPL_0_RET
|
---|
540 |
|
---|
541 | /** Variant of IEM_MC_DEFER_TO_CIMPL_1_RET with explicit instruction
|
---|
542 | * length parameter. */
|
---|
543 | #define IEM_MC_DEFER_TO_CIMPL_1_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
|
---|
544 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0)
|
---|
545 | #undef IEM_MC_DEFER_TO_CIMPL_1_RET
|
---|
546 |
|
---|
547 | /** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */
|
---|
548 | #define IEM_MC_DEFER_TO_CIMPL_2_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
|
---|
549 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1)
|
---|
550 | #undef IEM_MC_DEFER_TO_CIMPL_2_RET
|
---|
551 |
|
---|
552 | /** Variant of IEM_MC_DEFER_TO_CIMPL_3 with explicit instruction length
|
---|
553 | * parameter. */
|
---|
554 | #define IEM_MC_DEFER_TO_CIMPL_3_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
|
---|
555 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2)
|
---|
556 | #undef IEM_MC_DEFER_TO_CIMPL_3_RET
|
---|
557 |
|
---|
558 | /** Variant of IEM_MC_DEFER_TO_CIMPL_4 with explicit instruction length
|
---|
559 | * parameter. */
|
---|
560 | #define IEM_MC_DEFER_TO_CIMPL_4_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
|
---|
561 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3)
|
---|
562 | #undef IEM_MC_DEFER_TO_CIMPL_4_RET
|
---|
563 |
|
---|
564 | /** Variant of IEM_MC_DEFER_TO_CIMPL_5 with explicit instruction length
|
---|
565 | * parameter. */
|
---|
566 | #define IEM_MC_DEFER_TO_CIMPL_5_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
|
---|
567 | return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4)
|
---|
568 | #undef IEM_MC_DEFER_TO_CIMPL_5_RET
|
---|
569 |
|
---|
570 |
|
---|
571 | /** Variant of IEM_MC_FETCH_GREG_U8 with extended (20) register index. */
|
---|
572 | #define IEM_MC_FETCH_GREG_U8_THREADED(a_u8Dst, a_iGRegEx) \
|
---|
573 | (a_u8Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
|
---|
574 |
|
---|
575 | /** Variant of IEM_MC_FETCH_GREG_U8_ZX_U16 with extended (20) register index. */
|
---|
576 | #define IEM_MC_FETCH_GREG_U8_ZX_U16_THREADED(a_u16Dst, a_iGRegEx) \
|
---|
577 | (a_u16Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
|
---|
578 |
|
---|
579 | /** Variant of IEM_MC_FETCH_GREG_U8_ZX_U32 with extended (20) register index. */
|
---|
580 | #define IEM_MC_FETCH_GREG_U8_ZX_U32_THREADED(a_u32Dst, a_iGRegEx) \
|
---|
581 | (a_u32Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
|
---|
582 |
|
---|
583 | /** Variant of IEM_MC_FETCH_GREG_U8_ZX_U64 with extended (20) register index. */
|
---|
584 | #define IEM_MC_FETCH_GREG_U8_ZX_U64_THREADED(a_u64Dst, a_iGRegEx) \
|
---|
585 | (a_u64Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
|
---|
586 |
|
---|
587 | /** Variant of IEM_MC_FETCH_GREG_U8_SX_U16 with extended (20) register index. */
|
---|
588 | #define IEM_MC_FETCH_GREG_U8_SX_U16_THREADED(a_u16Dst, a_iGRegEx) \
|
---|
589 | (a_u16Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
|
---|
590 |
|
---|
591 | /** Variant of IEM_MC_FETCH_GREG_U8_SX_U32 with extended (20) register index. */
|
---|
592 | #define IEM_MC_FETCH_GREG_U8_SX_U32_THREADED(a_u32Dst, a_iGRegEx) \
|
---|
593 | (a_u32Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
|
---|
594 | #undef IEM_MC_FETCH_GREG_U8_SX_U32
|
---|
595 |
|
---|
596 | /** Variant of IEM_MC_FETCH_GREG_U8_SX_U64 with extended (20) register index. */
|
---|
597 | #define IEM_MC_FETCH_GREG_U8_SX_U64_THREADED(a_u64Dst, a_iGRegEx) \
|
---|
598 | (a_u64Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
|
---|
599 | #undef IEM_MC_FETCH_GREG_U8_SX_U64
|
---|
600 |
|
---|
601 | /** Variant of IEM_MC_STORE_GREG_U8 with extended (20) register index. */
|
---|
602 | #define IEM_MC_STORE_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
|
---|
603 | *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
|
---|
604 | #undef IEM_MC_STORE_GREG_U8
|
---|
605 |
|
---|
606 | /** Variant of IEM_MC_STORE_GREG_U8_CONST with extended (20) register index. */
|
---|
607 | #define IEM_MC_STORE_GREG_U8_CONST_THREADED(a_iGRegEx, a_u8Value) \
|
---|
608 | *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
|
---|
609 | #undef IEM_MC_STORE_GREG_U8
|
---|
610 |
|
---|
611 | /** Variant of IEM_MC_REF_GREG_U8 with extended (20) register index. */
|
---|
612 | #define IEM_MC_REF_GREG_U8_THREADED(a_pu8Dst, a_iGRegEx) \
|
---|
613 | (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx))
|
---|
614 | #undef IEM_MC_REF_GREG_U8
|
---|
615 |
|
---|
616 | /** Variant of IEM_MC_REF_GREG_U8_CONST with extended (20) register index. */
|
---|
617 | #define IEM_MC_REF_GREG_U8_CONST_THREADED(a_pu8Dst, a_iGRegEx) \
|
---|
618 | (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx))
|
---|
619 | #undef IEM_MC_REF_GREG_U8
|
---|
620 |
|
---|
621 | /** Variant of IEM_MC_ADD_GREG_U8_TO_LOCAL with extended (20) register index. */
|
---|
622 | #define IEM_MC_ADD_GREG_U8_TO_LOCAL_THREADED(a_u8Value, a_iGRegEx) \
|
---|
623 | do { (a_u8Value) += iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)); } while (0)
|
---|
624 | #undef IEM_MC_ADD_GREG_U8_TO_LOCAL
|
---|
625 |
|
---|
626 | /** Variant of IEM_MC_AND_GREG_U8 with extended (20) register index. */
|
---|
627 | #define IEM_MC_AND_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
|
---|
628 | *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) &= (a_u8Value)
|
---|
629 | #undef IEM_MC_AND_GREG_U8
|
---|
630 |
|
---|
631 | /** Variant of IEM_MC_OR_GREG_U8 with extended (20) register index. */
|
---|
632 | #define IEM_MC_OR_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
|
---|
633 | *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) |= (a_u8Value)
|
---|
634 | #undef IEM_MC_OR_GREG_U8
|
---|
635 |
|
---|
636 |
|
---|
637 | /** For asserting that only declared output flags changed. */
|
---|
638 | #ifndef VBOX_STRICT
|
---|
639 | # define IEM_MC_ASSERT_EFLAGS(a_fEflInput, a_fEflOutput) ((void)0)
|
---|
640 | #else
|
---|
641 | # undef IEM_MC_REF_EFLAGS_EX
|
---|
642 | # define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) \
|
---|
643 | uint32_t const fEflAssert = pVCpu->cpum.GstCtx.eflags.uBoth; \
|
---|
644 | IEM_MC_REF_EFLAGS(a_pEFlags)
|
---|
645 | # define IEM_MC_ASSERT_EFLAGS(a_fEflInput, a_fEflOutput) \
|
---|
646 | AssertMsg((pVCpu->cpum.GstCtx.eflags.uBoth & ~(a_fEflOutput)) == (fEflAssert & ~(a_fEflOutput)), \
|
---|
647 | ("now %#x (%#x), was %#x (%#x) - diff %#x; a_fEflOutput=%#x\n", \
|
---|
648 | (pVCpu->cpum.GstCtx.eflags.uBoth & ~(a_fEflOutput)), pVCpu->cpum.GstCtx.eflags.uBoth, \
|
---|
649 | (fEflAssert & ~(a_fEflOutput)), fEflAssert, \
|
---|
650 | (pVCpu->cpum.GstCtx.eflags.uBoth ^ fEflAssert) & ~(a_fEflOutput), a_fEflOutput))
|
---|
651 | #endif
|
---|
652 |
|
---|
653 |
|
---|
654 |
|
---|
655 | /**
|
---|
656 | * Calculates the effective address of a ModR/M memory operand, 16-bit
|
---|
657 | * addressing variant.
|
---|
658 | *
|
---|
659 | * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16.
|
---|
660 | *
|
---|
661 | * @returns The effective address.
|
---|
662 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
663 | * @param bRm The ModRM byte.
|
---|
664 | * @param u16Disp The displacement byte/word, if any.
|
---|
665 | * RIP relative addressing.
|
---|
666 | */
|
---|
667 | static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr16(PVMCPUCC pVCpu, uint8_t bRm, uint16_t u16Disp) RT_NOEXCEPT
|
---|
668 | {
|
---|
669 | Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: bRm=%#x u16Disp=%#x\n", bRm, u16Disp));
|
---|
670 | Assert(!IEM_IS_64BIT_CODE(pVCpu));
|
---|
671 |
|
---|
672 | /* Handle the disp16 form with no registers first. */
|
---|
673 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
|
---|
674 | {
|
---|
675 | Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16Disp));
|
---|
676 | return u16Disp;
|
---|
677 | }
|
---|
678 |
|
---|
679 | /* Get the displacment. */
|
---|
680 | /** @todo we can eliminate this step by making u16Disp have this value
|
---|
681 | * already! */
|
---|
682 | uint16_t u16EffAddr;
|
---|
683 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
684 | {
|
---|
685 | case 0: u16EffAddr = 0; break;
|
---|
686 | case 1: u16EffAddr = (int16_t)(int8_t)u16Disp; break;
|
---|
687 | case 2: u16EffAddr = u16Disp; break;
|
---|
688 | default: AssertFailedStmt(u16EffAddr = 0);
|
---|
689 | }
|
---|
690 |
|
---|
691 | /* Add the base and index registers to the disp. */
|
---|
692 | switch (bRm & X86_MODRM_RM_MASK)
|
---|
693 | {
|
---|
694 | case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
|
---|
695 | case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
|
---|
696 | case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; break;
|
---|
697 | case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; break;
|
---|
698 | case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
|
---|
699 | case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
|
---|
700 | case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; break;
|
---|
701 | case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
|
---|
702 | }
|
---|
703 |
|
---|
704 | Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16EffAddr));
|
---|
705 | return u16EffAddr;
|
---|
706 | }
|
---|
707 |
|
---|
708 |
|
---|
709 | /**
|
---|
710 | * Calculates the effective address of a ModR/M memory operand, 32-bit
|
---|
711 | * addressing variant.
|
---|
712 | *
|
---|
713 | * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32 and
|
---|
714 | * IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT.
|
---|
715 | *
|
---|
716 | * @returns The effective address.
|
---|
717 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
718 | * calling thread.
|
---|
719 | * @param bRm The ModRM byte.
|
---|
720 | * @param uSibAndRspOffset Two parts:
|
---|
721 | * - The first 8 bits make up the SIB byte.
|
---|
722 | * - The next 8 bits are the fixed RSP/ESP offse
|
---|
723 | * in case of a pop [xSP].
|
---|
724 | * @param u32Disp The displacement byte/dword, if any.
|
---|
725 | */
|
---|
726 | static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr32(PVMCPUCC pVCpu, uint8_t bRm, uint32_t uSibAndRspOffset,
|
---|
727 | uint32_t u32Disp) RT_NOEXCEPT
|
---|
728 | {
|
---|
729 | Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: bRm=%#x uSibAndRspOffset=%#x u32Disp=%#x\n", bRm, uSibAndRspOffset, u32Disp));
|
---|
730 |
|
---|
731 | /* Handle the disp32 form with no registers first. */
|
---|
732 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
733 | {
|
---|
734 | Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32Disp));
|
---|
735 | return u32Disp;
|
---|
736 | }
|
---|
737 |
|
---|
738 | /* Get the register (or SIB) value. */
|
---|
739 | uint32_t u32EffAddr;
|
---|
740 | #ifdef _MSC_VER
|
---|
741 | u32EffAddr = 0;/* MSC uninitialized variable analysis is too simple, it seems. */
|
---|
742 | #endif
|
---|
743 | switch (bRm & X86_MODRM_RM_MASK)
|
---|
744 | {
|
---|
745 | case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
|
---|
746 | case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
|
---|
747 | case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
|
---|
748 | case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
|
---|
749 | case 4: /* SIB */
|
---|
750 | {
|
---|
751 | /* Get the index and scale it. */
|
---|
752 | switch ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
|
---|
753 | {
|
---|
754 | case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
|
---|
755 | case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
|
---|
756 | case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
|
---|
757 | case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
|
---|
758 | case 4: u32EffAddr = 0; /*none */ break;
|
---|
759 | case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
|
---|
760 | case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
|
---|
761 | case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
|
---|
762 | }
|
---|
763 | u32EffAddr <<= (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
764 |
|
---|
765 | /* add base */
|
---|
766 | switch (uSibAndRspOffset & X86_SIB_BASE_MASK)
|
---|
767 | {
|
---|
768 | case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
|
---|
769 | case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
|
---|
770 | case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
|
---|
771 | case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
|
---|
772 | case 4:
|
---|
773 | u32EffAddr += pVCpu->cpum.GstCtx.esp;
|
---|
774 | u32EffAddr += uSibAndRspOffset >> 8;
|
---|
775 | break;
|
---|
776 | case 5:
|
---|
777 | if ((bRm & X86_MODRM_MOD_MASK) != 0)
|
---|
778 | u32EffAddr += pVCpu->cpum.GstCtx.ebp;
|
---|
779 | else
|
---|
780 | u32EffAddr += u32Disp;
|
---|
781 | break;
|
---|
782 | case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
|
---|
783 | case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
|
---|
784 | }
|
---|
785 | break;
|
---|
786 | }
|
---|
787 | case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
|
---|
788 | case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
|
---|
789 | case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
|
---|
790 | }
|
---|
791 |
|
---|
792 | /* Get and add the displacement. */
|
---|
793 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
794 | {
|
---|
795 | case 0: break;
|
---|
796 | case 1: u32EffAddr += (int8_t)u32Disp; break;
|
---|
797 | case 2: u32EffAddr += u32Disp; break;
|
---|
798 | default: AssertFailed();
|
---|
799 | }
|
---|
800 |
|
---|
801 | Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32EffAddr));
|
---|
802 | return u32EffAddr;
|
---|
803 | }
|
---|
804 |
|
---|
805 |
|
---|
806 | /**
|
---|
807 | * Calculates the effective address of a ModR/M memory operand.
|
---|
808 | *
|
---|
809 | * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64.
|
---|
810 | *
|
---|
811 | * @returns The effective address.
|
---|
812 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
813 | * calling thread.
|
---|
814 | * @param bRmEx The ModRM byte but with bit 3 set to REX.B and
|
---|
815 | * bit 4 to REX.X. The two bits are part of the
|
---|
816 | * REG sub-field, which isn't needed in this
|
---|
817 | * function.
|
---|
818 | * @param uSibAndRspOffset Two parts:
|
---|
819 | * - The first 8 bits make up the SIB byte.
|
---|
820 | * - The next 8 bits are the fixed RSP/ESP offset
|
---|
821 | * in case of a pop [xSP].
|
---|
822 | * @param u32Disp The displacement byte/word/dword, if any.
|
---|
823 | * @param cbInstr The size of the fully decoded instruction. Used
|
---|
824 | * for RIP relative addressing.
|
---|
825 | * @todo combine cbInstr and cbImm!
|
---|
826 | */
|
---|
827 | static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr64(PVMCPUCC pVCpu, uint8_t bRmEx, uint32_t uSibAndRspOffset,
|
---|
828 | uint32_t u32Disp, uint8_t cbInstr) RT_NOEXCEPT
|
---|
829 | {
|
---|
830 | Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: bRmEx=%#x\n", bRmEx));
|
---|
831 | Assert(IEM_IS_64BIT_CODE(pVCpu));
|
---|
832 |
|
---|
833 | uint64_t u64EffAddr;
|
---|
834 |
|
---|
835 | /* Handle the rip+disp32 form with no registers first. */
|
---|
836 | if ((bRmEx & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
837 | {
|
---|
838 | u64EffAddr = (int32_t)u32Disp;
|
---|
839 | u64EffAddr += pVCpu->cpum.GstCtx.rip + cbInstr;
|
---|
840 | }
|
---|
841 | else
|
---|
842 | {
|
---|
843 | /* Get the register (or SIB) value. */
|
---|
844 | #ifdef _MSC_VER
|
---|
845 | u64EffAddr = 0; /* MSC uninitialized variable analysis is too simple, it seems. */
|
---|
846 | #endif
|
---|
847 | switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */
|
---|
848 | {
|
---|
849 | default:
|
---|
850 | case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
|
---|
851 | case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
|
---|
852 | case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
|
---|
853 | case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
|
---|
854 | case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
|
---|
855 | case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
|
---|
856 | case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
|
---|
857 | case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
|
---|
858 | case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
|
---|
859 | case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
|
---|
860 | case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
|
---|
861 | case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
|
---|
862 | case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
|
---|
863 | case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
|
---|
864 | /* SIB */
|
---|
865 | case 4:
|
---|
866 | case 12:
|
---|
867 | {
|
---|
868 | /* Get the index and scale it. */
|
---|
869 | switch ( ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
|
---|
870 | | ((bRmEx & 0x10) >> 1)) /* bRmEx[bit 4] = REX.X */
|
---|
871 | {
|
---|
872 | case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
|
---|
873 | case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
|
---|
874 | case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
|
---|
875 | case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
|
---|
876 | case 4: u64EffAddr = 0; /*none */ break;
|
---|
877 | case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
|
---|
878 | case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
|
---|
879 | case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
|
---|
880 | case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
|
---|
881 | case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
|
---|
882 | case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
|
---|
883 | case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
|
---|
884 | case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
|
---|
885 | case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
|
---|
886 | case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
|
---|
887 | case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
|
---|
888 | }
|
---|
889 | u64EffAddr <<= (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
890 |
|
---|
891 | /* add base */
|
---|
892 | switch ((uSibAndRspOffset & X86_SIB_BASE_MASK) | (bRmEx & 0x8)) /* bRmEx[bit 3] = REX.B */
|
---|
893 | {
|
---|
894 | case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
|
---|
895 | case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
|
---|
896 | case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
|
---|
897 | case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
|
---|
898 | case 4:
|
---|
899 | u64EffAddr += pVCpu->cpum.GstCtx.rsp;
|
---|
900 | u64EffAddr += uSibAndRspOffset >> 8;
|
---|
901 | break;
|
---|
902 | case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
|
---|
903 | case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
|
---|
904 | case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
|
---|
905 | case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
|
---|
906 | case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
|
---|
907 | case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
|
---|
908 | case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
|
---|
909 | case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
|
---|
910 | case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
|
---|
911 | /* complicated encodings */
|
---|
912 | case 5:
|
---|
913 | if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
|
---|
914 | u64EffAddr += pVCpu->cpum.GstCtx.rbp;
|
---|
915 | else
|
---|
916 | u64EffAddr += (int32_t)u32Disp;
|
---|
917 | break;
|
---|
918 | case 13:
|
---|
919 | if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
|
---|
920 | u64EffAddr += pVCpu->cpum.GstCtx.r13;
|
---|
921 | else
|
---|
922 | u64EffAddr += (int32_t)u32Disp;
|
---|
923 | break;
|
---|
924 | }
|
---|
925 | break;
|
---|
926 | }
|
---|
927 | }
|
---|
928 |
|
---|
929 | /* Get and add the displacement. */
|
---|
930 | switch ((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
931 | {
|
---|
932 | case 0: break;
|
---|
933 | case 1: u64EffAddr += (int8_t)u32Disp; break;
|
---|
934 | case 2: u64EffAddr += (int32_t)u32Disp; break;
|
---|
935 | default: AssertFailed();
|
---|
936 | }
|
---|
937 | }
|
---|
938 |
|
---|
939 | Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: EffAddr=%#010RGv\n", u64EffAddr));
|
---|
940 | return u64EffAddr;
|
---|
941 | }
|
---|
942 |
|
---|
943 |
|
---|
944 | /*
|
---|
945 | * The threaded functions.
|
---|
946 | */
|
---|
947 | #include "IEMThreadedFunctions.cpp.h"
|
---|
948 |
|
---|