VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 39751

Last change on this file since 39751 was 39497, checked in by vboxsync, 13 years ago

IEM: todo

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 397.5 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 39497 2011-12-01 19:59:05Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 IEM_MC_ADVANCE_RIP();
133 IEM_MC_END();
134 break;
135
136 case IEMMODE_64BIT:
137 IEM_MC_BEGIN(3, 0);
138 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
139 IEM_MC_ARG(uint64_t, u64Src, 1);
140 IEM_MC_ARG(uint32_t *, pEFlags, 2);
141
142 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
143 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
144 IEM_MC_REF_EFLAGS(pEFlags);
145 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
146
147 IEM_MC_ADVANCE_RIP();
148 IEM_MC_END();
149 break;
150 }
151 }
152 else
153 {
154 /*
155 * We're accessing memory.
156 * Note! We're putting the eflags on the stack here so we can commit them
157 * after the memory.
158 */
159 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
160 switch (pIemCpu->enmEffOpSize)
161 {
162 case IEMMODE_16BIT:
163 IEM_MC_BEGIN(3, 2);
164 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
165 IEM_MC_ARG(uint16_t, u16Src, 1);
166 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
167 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
168
169 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
170 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
171 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
172 IEM_MC_FETCH_EFLAGS(EFlags);
173 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
174 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
175 else
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
177
178 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
179 IEM_MC_COMMIT_EFLAGS(EFlags);
180 IEM_MC_ADVANCE_RIP();
181 IEM_MC_END();
182 break;
183
184 case IEMMODE_32BIT:
185 IEM_MC_BEGIN(3, 2);
186 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
187 IEM_MC_ARG(uint32_t, u32Src, 1);
188 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
189 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
190
191 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
192 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
193 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
194 IEM_MC_FETCH_EFLAGS(EFlags);
195 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
196 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
197 else
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
199
200 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
201 IEM_MC_COMMIT_EFLAGS(EFlags);
202 IEM_MC_ADVANCE_RIP();
203 IEM_MC_END();
204 break;
205
206 case IEMMODE_64BIT:
207 IEM_MC_BEGIN(3, 2);
208 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
209 IEM_MC_ARG(uint64_t, u64Src, 1);
210 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
211 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
212
213 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
214 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
215 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
216 IEM_MC_FETCH_EFLAGS(EFlags);
217 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
218 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
219 else
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
221
222 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
223 IEM_MC_COMMIT_EFLAGS(EFlags);
224 IEM_MC_ADVANCE_RIP();
225 IEM_MC_END();
226 break;
227 }
228 }
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
235 * the destination.
236 *
237 * @param pImpl Pointer to the instruction implementation (assembly).
238 */
239FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
240{
241 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
242 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
243
244 /*
245 * If rm is denoting a register, no more instruction bytes.
246 */
247 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
248 {
249 IEM_MC_BEGIN(3, 0);
250 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
251 IEM_MC_ARG(uint8_t, u8Src, 1);
252 IEM_MC_ARG(uint32_t *, pEFlags, 2);
253
254 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
255 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
256 IEM_MC_REF_EFLAGS(pEFlags);
257 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
258
259 IEM_MC_ADVANCE_RIP();
260 IEM_MC_END();
261 }
262 else
263 {
264 /*
265 * We're accessing memory.
266 */
267 IEM_MC_BEGIN(3, 1);
268 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
269 IEM_MC_ARG(uint8_t, u8Src, 1);
270 IEM_MC_ARG(uint32_t *, pEFlags, 2);
271 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
272
273 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
274 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
275 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
276 IEM_MC_REF_EFLAGS(pEFlags);
277 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
278
279 IEM_MC_ADVANCE_RIP();
280 IEM_MC_END();
281 }
282 return VINF_SUCCESS;
283}
284
285
286/**
287 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
288 * register as the destination.
289 *
290 * @param pImpl Pointer to the instruction implementation (assembly).
291 */
292FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
293{
294 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
295 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
296
297 /*
298 * If rm is denoting a register, no more instruction bytes.
299 */
300 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
301 {
302 switch (pIemCpu->enmEffOpSize)
303 {
304 case IEMMODE_16BIT:
305 IEM_MC_BEGIN(3, 0);
306 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
307 IEM_MC_ARG(uint16_t, u16Src, 1);
308 IEM_MC_ARG(uint32_t *, pEFlags, 2);
309
310 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
311 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
312 IEM_MC_REF_EFLAGS(pEFlags);
313 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
314
315 IEM_MC_ADVANCE_RIP();
316 IEM_MC_END();
317 break;
318
319 case IEMMODE_32BIT:
320 IEM_MC_BEGIN(3, 0);
321 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
322 IEM_MC_ARG(uint32_t, u32Src, 1);
323 IEM_MC_ARG(uint32_t *, pEFlags, 2);
324
325 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
326 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
327 IEM_MC_REF_EFLAGS(pEFlags);
328 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
329
330 IEM_MC_ADVANCE_RIP();
331 IEM_MC_END();
332 break;
333
334 case IEMMODE_64BIT:
335 IEM_MC_BEGIN(3, 0);
336 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
337 IEM_MC_ARG(uint64_t, u64Src, 1);
338 IEM_MC_ARG(uint32_t *, pEFlags, 2);
339
340 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
341 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
342 IEM_MC_REF_EFLAGS(pEFlags);
343 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
344
345 IEM_MC_ADVANCE_RIP();
346 IEM_MC_END();
347 break;
348 }
349 }
350 else
351 {
352 /*
353 * We're accessing memory.
354 */
355 switch (pIemCpu->enmEffOpSize)
356 {
357 case IEMMODE_16BIT:
358 IEM_MC_BEGIN(3, 1);
359 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
360 IEM_MC_ARG(uint16_t, u16Src, 1);
361 IEM_MC_ARG(uint32_t *, pEFlags, 2);
362 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
363
364 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
365 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
366 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
367 IEM_MC_REF_EFLAGS(pEFlags);
368 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
369
370 IEM_MC_ADVANCE_RIP();
371 IEM_MC_END();
372 break;
373
374 case IEMMODE_32BIT:
375 IEM_MC_BEGIN(3, 1);
376 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
377 IEM_MC_ARG(uint32_t, u32Src, 1);
378 IEM_MC_ARG(uint32_t *, pEFlags, 2);
379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
380
381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
382 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
383 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
384 IEM_MC_REF_EFLAGS(pEFlags);
385 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
386
387 IEM_MC_ADVANCE_RIP();
388 IEM_MC_END();
389 break;
390
391 case IEMMODE_64BIT:
392 IEM_MC_BEGIN(3, 1);
393 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
394 IEM_MC_ARG(uint64_t, u64Src, 1);
395 IEM_MC_ARG(uint32_t *, pEFlags, 2);
396 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
397
398 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
399 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
400 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
401 IEM_MC_REF_EFLAGS(pEFlags);
402 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
403
404 IEM_MC_ADVANCE_RIP();
405 IEM_MC_END();
406 break;
407 }
408 }
409 return VINF_SUCCESS;
410}
411
412
413/**
414 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
415 * a byte immediate.
416 *
417 * @param pImpl Pointer to the instruction implementation (assembly).
418 */
419FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
420{
421 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
422 IEMOP_HLP_NO_LOCK_PREFIX();
423
424 IEM_MC_BEGIN(3, 0);
425 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
426 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
427 IEM_MC_ARG(uint32_t *, pEFlags, 2);
428
429 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
430 IEM_MC_REF_EFLAGS(pEFlags);
431 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
432
433 IEM_MC_ADVANCE_RIP();
434 IEM_MC_END();
435 return VINF_SUCCESS;
436}
437
438
439/**
440 * Common worker for instructions like ADD, AND, OR, ++ with working on
441 * AX/EAX/RAX with a word/dword immediate.
442 *
443 * @param pImpl Pointer to the instruction implementation (assembly).
444 */
445FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
446{
447 switch (pIemCpu->enmEffOpSize)
448 {
449 case IEMMODE_16BIT:
450 {
451 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
452 IEMOP_HLP_NO_LOCK_PREFIX();
453
454 IEM_MC_BEGIN(3, 0);
455 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
456 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
457 IEM_MC_ARG(uint32_t *, pEFlags, 2);
458
459 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
460 IEM_MC_REF_EFLAGS(pEFlags);
461 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
462
463 IEM_MC_ADVANCE_RIP();
464 IEM_MC_END();
465 return VINF_SUCCESS;
466 }
467
468 case IEMMODE_32BIT:
469 {
470 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
471 IEMOP_HLP_NO_LOCK_PREFIX();
472
473 IEM_MC_BEGIN(3, 0);
474 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
475 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
476 IEM_MC_ARG(uint32_t *, pEFlags, 2);
477
478 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
479 IEM_MC_REF_EFLAGS(pEFlags);
480 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
481
482 IEM_MC_ADVANCE_RIP();
483 IEM_MC_END();
484 return VINF_SUCCESS;
485 }
486
487 case IEMMODE_64BIT:
488 {
489 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
490 IEMOP_HLP_NO_LOCK_PREFIX();
491
492 IEM_MC_BEGIN(3, 0);
493 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
494 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
495 IEM_MC_ARG(uint32_t *, pEFlags, 2);
496
497 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
498 IEM_MC_REF_EFLAGS(pEFlags);
499 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
500
501 IEM_MC_ADVANCE_RIP();
502 IEM_MC_END();
503 return VINF_SUCCESS;
504 }
505
506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
507 }
508}
509
510
511/** Opcodes 0xf1, 0xd6. */
512FNIEMOP_DEF(iemOp_Invalid)
513{
514 IEMOP_MNEMONIC("Invalid");
515 return IEMOP_RAISE_INVALID_OPCODE();
516}
517
518
519
520/** @name ..... opcodes.
521 *
522 * @{
523 */
524
525/** @} */
526
527
528/** @name Two byte opcodes (first byte 0x0f).
529 *
530 * @{
531 */
532
533/** Opcode 0x0f 0x00 /0. */
534FNIEMOP_STUB_1(iemOp_Grp6_sldt, uint8_t, bRm);
535
536
537/** Opcode 0x0f 0x00 /1. */
538FNIEMOP_STUB_1(iemOp_Grp6_str, uint8_t, bRm);
539
540
541/** Opcode 0x0f 0x00 /2. */
542FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
543{
544 IEMOP_HLP_NO_LOCK_PREFIX();
545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
546 {
547 IEM_MC_BEGIN(1, 0);
548 IEM_MC_ARG(uint16_t, u16Sel, 0);
549 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
550 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
551 IEM_MC_END();
552 }
553 else
554 {
555 IEM_MC_BEGIN(1, 1);
556 IEM_MC_ARG(uint16_t, u16Sel, 0);
557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
558 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
560 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
561 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
562 IEM_MC_END();
563 }
564 return VINF_SUCCESS;
565}
566
567
568/** Opcode 0x0f 0x00 /3. */
569FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
570{
571 IEMOP_HLP_NO_LOCK_PREFIX();
572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
573 {
574 IEM_MC_BEGIN(1, 0);
575 IEM_MC_ARG(uint16_t, u16Sel, 0);
576 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
577 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
578 IEM_MC_END();
579 }
580 else
581 {
582 IEM_MC_BEGIN(1, 1);
583 IEM_MC_ARG(uint16_t, u16Sel, 0);
584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
585 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
586 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
587 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
588 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
589 IEM_MC_END();
590 }
591 return VINF_SUCCESS;
592}
593
594
595/** Opcode 0x0f 0x00 /4. */
596FNIEMOP_STUB_1(iemOp_Grp6_verr, uint8_t, bRm);
597
598
599/** Opcode 0x0f 0x00 /5. */
600FNIEMOP_STUB_1(iemOp_Grp6_verw, uint8_t, bRm);
601
602
603/** Opcode 0x0f 0x00. */
604FNIEMOP_DEF(iemOp_Grp6)
605{
606 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
607 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
608 {
609 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
610 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
611 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
612 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
613 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
614 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
615 case 6: return IEMOP_RAISE_INVALID_OPCODE();
616 case 7: return IEMOP_RAISE_INVALID_OPCODE();
617 IEM_NOT_REACHED_DEFAULT_CASE_RET();
618 }
619
620}
621
622
623/** Opcode 0x0f 0x01 /0. */
624FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
625{
626 NOREF(pIemCpu); NOREF(bRm);
627 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
628}
629
630
631/** Opcode 0x0f 0x01 /0. */
632FNIEMOP_DEF(iemOp_Grp7_vmcall)
633{
634 AssertFailed();
635 return IEMOP_RAISE_INVALID_OPCODE();
636}
637
638
639/** Opcode 0x0f 0x01 /0. */
640FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
641{
642 AssertFailed();
643 return IEMOP_RAISE_INVALID_OPCODE();
644}
645
646
647/** Opcode 0x0f 0x01 /0. */
648FNIEMOP_DEF(iemOp_Grp7_vmresume)
649{
650 AssertFailed();
651 return IEMOP_RAISE_INVALID_OPCODE();
652}
653
654
655/** Opcode 0x0f 0x01 /0. */
656FNIEMOP_DEF(iemOp_Grp7_vmxoff)
657{
658 AssertFailed();
659 return IEMOP_RAISE_INVALID_OPCODE();
660}
661
662
663/** Opcode 0x0f 0x01 /1. */
664FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
665{
666 NOREF(pIemCpu); NOREF(bRm);
667 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
668}
669
670
671/** Opcode 0x0f 0x01 /1. */
672FNIEMOP_DEF(iemOp_Grp7_monitor)
673{
674 NOREF(pIemCpu);
675 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
676}
677
678
679/** Opcode 0x0f 0x01 /1. */
680FNIEMOP_DEF(iemOp_Grp7_mwait)
681{
682 NOREF(pIemCpu);
683 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
684}
685
686
687/** Opcode 0x0f 0x01 /2. */
688FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
689{
690 IEMOP_HLP_NO_LOCK_PREFIX();
691
692 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
693 ? IEMMODE_64BIT
694 : pIemCpu->enmEffOpSize;
695 IEM_MC_BEGIN(3, 1);
696 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
697 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
698 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
699 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
700 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
701 IEM_MC_END();
702 return VINF_SUCCESS;
703}
704
705
706/** Opcode 0x0f 0x01 /2. */
707FNIEMOP_DEF(iemOp_Grp7_xgetbv)
708{
709 AssertFailed();
710 return IEMOP_RAISE_INVALID_OPCODE();
711}
712
713
714/** Opcode 0x0f 0x01 /2. */
715FNIEMOP_DEF(iemOp_Grp7_xsetbv)
716{
717 AssertFailed();
718 return IEMOP_RAISE_INVALID_OPCODE();
719}
720
721
722/** Opcode 0x0f 0x01 /3. */
723FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
724{
725 IEMOP_HLP_NO_LOCK_PREFIX();
726
727 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
728 ? IEMMODE_64BIT
729 : pIemCpu->enmEffOpSize;
730 IEM_MC_BEGIN(3, 1);
731 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
732 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
733 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
734 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
735 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
736 IEM_MC_END();
737 return VINF_SUCCESS;
738}
739
740
741/** Opcode 0x0f 0x01 /4. */
742FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
743{
744 IEMOP_HLP_NO_LOCK_PREFIX();
745 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
746 {
747 switch (pIemCpu->enmEffOpSize)
748 {
749 case IEMMODE_16BIT:
750 IEM_MC_BEGIN(0, 1);
751 IEM_MC_LOCAL(uint16_t, u16Tmp);
752 IEM_MC_FETCH_CR0_U16(u16Tmp);
753 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
754 IEM_MC_ADVANCE_RIP();
755 IEM_MC_END();
756 return VINF_SUCCESS;
757
758 case IEMMODE_32BIT:
759 IEM_MC_BEGIN(0, 1);
760 IEM_MC_LOCAL(uint32_t, u32Tmp);
761 IEM_MC_FETCH_CR0_U32(u32Tmp);
762 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
763 IEM_MC_ADVANCE_RIP();
764 IEM_MC_END();
765 return VINF_SUCCESS;
766
767 case IEMMODE_64BIT:
768 IEM_MC_BEGIN(0, 1);
769 IEM_MC_LOCAL(uint64_t, u64Tmp);
770 IEM_MC_FETCH_CR0_U64(u64Tmp);
771 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
772 IEM_MC_ADVANCE_RIP();
773 IEM_MC_END();
774 return VINF_SUCCESS;
775
776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
777 }
778 }
779 else
780 {
781 /* Ignore operand size here, memory refs are always 16-bit. */
782 IEM_MC_BEGIN(0, 2);
783 IEM_MC_LOCAL(uint16_t, u16Tmp);
784 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
785 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
786 IEM_MC_FETCH_CR0_U16(u16Tmp);
787 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
788 IEM_MC_ADVANCE_RIP();
789 IEM_MC_END();
790 return VINF_SUCCESS;
791 }
792}
793
794
795/** Opcode 0x0f 0x01 /6. */
796FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
797{
798 /* The operand size is effectively ignored, all is 16-bit and only the
799 lower 3-bits are used. */
800 IEMOP_HLP_NO_LOCK_PREFIX();
801 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
802 {
803 IEM_MC_BEGIN(1, 0);
804 IEM_MC_ARG(uint16_t, u16Tmp, 0);
805 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
806 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
807 IEM_MC_END();
808 }
809 else
810 {
811 IEM_MC_BEGIN(1, 1);
812 IEM_MC_ARG(uint16_t, u16Tmp, 0);
813 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
814 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
815 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
816 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
817 IEM_MC_END();
818 }
819 return VINF_SUCCESS;
820}
821
822
823/** Opcode 0x0f 0x01 /7. */
824FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
825{
826 NOREF(pIemCpu); NOREF(bRm);
827 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
828}
829
830
831/** Opcode 0x0f 0x01 /7. */
832FNIEMOP_DEF(iemOp_Grp7_swapgs)
833{
834 NOREF(pIemCpu);
835 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
836}
837
838
839/** Opcode 0x0f 0x01 /7. */
840FNIEMOP_DEF(iemOp_Grp7_rdtscp)
841{
842 NOREF(pIemCpu);
843 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
844}
845
846
847/** Opcode 0x0f 0x01. */
848FNIEMOP_DEF(iemOp_Grp7)
849{
850 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
851 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
852 {
853 case 0:
854 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
855 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
856 switch (bRm & X86_MODRM_RM_MASK)
857 {
858 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
859 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
860 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
861 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
862 }
863 return IEMOP_RAISE_INVALID_OPCODE();
864
865 case 1:
866 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
867 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
868 switch (bRm & X86_MODRM_RM_MASK)
869 {
870 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
871 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
872 }
873 return IEMOP_RAISE_INVALID_OPCODE();
874
875 case 2:
876 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
877 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
878 switch (bRm & X86_MODRM_RM_MASK)
879 {
880 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
881 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
882 }
883 return IEMOP_RAISE_INVALID_OPCODE();
884
885 case 3:
886 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
887 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
888 return IEMOP_RAISE_INVALID_OPCODE();
889
890 case 4:
891 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
892
893 case 5:
894 return IEMOP_RAISE_INVALID_OPCODE();
895
896 case 6:
897 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
898
899 case 7:
900 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
901 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
902 switch (bRm & X86_MODRM_RM_MASK)
903 {
904 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
905 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
906 }
907 return IEMOP_RAISE_INVALID_OPCODE();
908
909 IEM_NOT_REACHED_DEFAULT_CASE_RET();
910 }
911}
912
913
914/** Opcode 0x0f 0x02. */
915FNIEMOP_STUB(iemOp_lar_Gv_Ew);
916/** Opcode 0x0f 0x03. */
917FNIEMOP_STUB(iemOp_lsl_Gv_Ew);
918/** Opcode 0x0f 0x04. */
919FNIEMOP_STUB(iemOp_syscall);
920
921
922/** Opcode 0x0f 0x05. */
923FNIEMOP_DEF(iemOp_clts)
924{
925 IEMOP_MNEMONIC("clts");
926 IEMOP_HLP_NO_LOCK_PREFIX();
927 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
928}
929
930
931/** Opcode 0x0f 0x06. */
932FNIEMOP_STUB(iemOp_sysret);
933/** Opcode 0x0f 0x08. */
934FNIEMOP_STUB(iemOp_invd);
935/** Opcode 0x0f 0x09. */
936FNIEMOP_STUB(iemOp_wbinvd);
937/** Opcode 0x0f 0x0b. */
938FNIEMOP_STUB(iemOp_ud2);
939/** Opcode 0x0f 0x0d. */
940FNIEMOP_STUB(iemOp_nop_Ev_prefetch);
941/** Opcode 0x0f 0x0e. */
942FNIEMOP_STUB(iemOp_femms);
943/** Opcode 0x0f 0x0f. */
944FNIEMOP_STUB(iemOp_3Dnow);
945/** Opcode 0x0f 0x10. */
946FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
947/** Opcode 0x0f 0x11. */
948FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
949/** Opcode 0x0f 0x12. */
950FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq);
951/** Opcode 0x0f 0x13. */
952FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq);
953/** Opcode 0x0f 0x14. */
954FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
955/** Opcode 0x0f 0x15. */
956FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
957/** Opcode 0x0f 0x16. */
958FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq);
959/** Opcode 0x0f 0x17. */
960FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq);
961/** Opcode 0x0f 0x18. */
962FNIEMOP_STUB(iemOp_prefetch_Grp16);
963
964
965/** Opcode 0x0f 0x20. */
966FNIEMOP_DEF(iemOp_mov_Rd_Cd)
967{
968 /* mod is ignored, as is operand size overrides. */
969 IEMOP_MNEMONIC("mov Rd,Cd");
970 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
971 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
972 else
973 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
974
975 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
976 * before the privilege level violation (\#GP). */
977 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
978 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
979 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
980 {
981 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
982 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
983 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
984 iCrReg |= 8;
985 }
986 switch (iCrReg)
987 {
988 case 0: case 2: case 3: case 4: case 8:
989 break;
990 default:
991 return IEMOP_RAISE_INVALID_OPCODE();
992 }
993
994 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
995}
996
997
998/** Opcode 0x0f 0x21. */
999FNIEMOP_DEF(iemOp_mov_Rd_Dd)
1000{
1001 IEMOP_MNEMONIC("mov Rd,Dd");
1002 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1003 IEMOP_HLP_NO_LOCK_PREFIX();
1004 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1005 return IEMOP_RAISE_INVALID_OPCODE();
1006 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1007 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1008 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1009}
1010
1011
1012/** Opcode 0x0f 0x22. */
1013FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1014{
1015 /* mod is ignored, as is operand size overrides. */
1016 IEMOP_MNEMONIC("mov Cd,Rd");
1017 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1018 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1019 else
1020 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1021
1022 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
1023 * before the privilege level violation (\#GP). */
1024 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1025 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1026 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1027 {
1028 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1029 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1030 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
1031 iCrReg |= 8;
1032 }
1033 switch (iCrReg)
1034 {
1035 case 0: case 2: case 3: case 4: case 8:
1036 break;
1037 default:
1038 return IEMOP_RAISE_INVALID_OPCODE();
1039 }
1040
1041 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1042}
1043
1044
1045/** Opcode 0x0f 0x23. */
1046FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1047{
1048 IEMOP_MNEMONIC("mov Dd,Rd");
1049 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1050 IEMOP_HLP_NO_LOCK_PREFIX();
1051 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1052 return IEMOP_RAISE_INVALID_OPCODE();
1053 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1054 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1055 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1056}
1057
1058
1059/** Opcode 0x0f 0x24. */
1060FNIEMOP_DEF(iemOp_mov_Rd_Td)
1061{
1062 IEMOP_MNEMONIC("mov Rd,Td");
1063/** @todo Is the invalid opcode raise before parsing any R/M byte? */
1064 return IEMOP_RAISE_INVALID_OPCODE();
1065}
1066
1067
1068
1069/** Opcode 0x0f 0x26. */
1070FNIEMOP_DEF(iemOp_mov_Td_Rd)
1071{
1072 IEMOP_MNEMONIC("mov Td,Rd");
1073 return IEMOP_RAISE_INVALID_OPCODE();
1074}
1075
1076
1077/** Opcode 0x0f 0x28. */
1078FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1079/** Opcode 0x0f 0x29. */
1080FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1081/** Opcode 0x0f 0x2a. */
1082FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey);
1083/** Opcode 0x0f 0x2b. */
1084FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd);
1085/** Opcode 0x0f 0x2c. */
1086FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd);
1087/** Opcode 0x0f 0x2d. */
1088FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1089/** Opcode 0x0f 0x2e. */
1090FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd);
1091/** Opcode 0x0f 0x2f. */
1092FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1093/** Opcode 0x0f 0x30. */
1094FNIEMOP_STUB(iemOp_wrmsr);
1095
1096
1097/** Opcode 0x0f 0x31. */
1098FNIEMOP_DEF(iemOp_rdtsc)
1099{
1100 IEMOP_MNEMONIC("rdtsc");
1101 IEMOP_HLP_NO_LOCK_PREFIX();
1102 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1103}
1104
1105
1106/** Opcode 0x0f 0x33. */
1107FNIEMOP_STUB(iemOp_rdmsr);
1108/** Opcode 0x0f 0x34. */
1109FNIEMOP_STUB(iemOp_rdpmc);
1110/** Opcode 0x0f 0x34. */
1111FNIEMOP_STUB(iemOp_sysenter);
1112/** Opcode 0x0f 0x35. */
1113FNIEMOP_STUB(iemOp_sysexit);
1114/** Opcode 0x0f 0x37. */
1115FNIEMOP_STUB(iemOp_getsec);
1116/** Opcode 0x0f 0x38. */
1117FNIEMOP_STUB(iemOp_3byte_Esc_A4);
1118/** Opcode 0x0f 0x39. */
1119FNIEMOP_STUB(iemOp_3byte_Esc_A5);
1120/** Opcode 0x0f 0x3c (?). */
1121FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1122
1123/**
1124 * Implements a conditional move.
1125 *
1126 * Wish there was an obvious way to do this where we could share and reduce
1127 * code bloat.
1128 *
1129 * @param a_Cnd The conditional "microcode" operation.
1130 */
1131#define CMOV_X(a_Cnd) \
1132 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1133 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1134 { \
1135 switch (pIemCpu->enmEffOpSize) \
1136 { \
1137 case IEMMODE_16BIT: \
1138 IEM_MC_BEGIN(0, 1); \
1139 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1140 a_Cnd { \
1141 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1142 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1143 } IEM_MC_ENDIF(); \
1144 IEM_MC_ADVANCE_RIP(); \
1145 IEM_MC_END(); \
1146 return VINF_SUCCESS; \
1147 \
1148 case IEMMODE_32BIT: \
1149 IEM_MC_BEGIN(0, 1); \
1150 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1151 a_Cnd { \
1152 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1153 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1154 } IEM_MC_ELSE() { \
1155 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1156 } IEM_MC_ENDIF(); \
1157 IEM_MC_ADVANCE_RIP(); \
1158 IEM_MC_END(); \
1159 return VINF_SUCCESS; \
1160 \
1161 case IEMMODE_64BIT: \
1162 IEM_MC_BEGIN(0, 1); \
1163 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1164 a_Cnd { \
1165 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1166 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1167 } IEM_MC_ENDIF(); \
1168 IEM_MC_ADVANCE_RIP(); \
1169 IEM_MC_END(); \
1170 return VINF_SUCCESS; \
1171 \
1172 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1173 } \
1174 } \
1175 else \
1176 { \
1177 switch (pIemCpu->enmEffOpSize) \
1178 { \
1179 case IEMMODE_16BIT: \
1180 IEM_MC_BEGIN(0, 2); \
1181 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1182 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1183 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1184 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1185 a_Cnd { \
1186 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1187 } IEM_MC_ENDIF(); \
1188 IEM_MC_ADVANCE_RIP(); \
1189 IEM_MC_END(); \
1190 return VINF_SUCCESS; \
1191 \
1192 case IEMMODE_32BIT: \
1193 IEM_MC_BEGIN(0, 2); \
1194 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1195 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1196 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1197 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1198 a_Cnd { \
1199 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1200 } IEM_MC_ELSE() { \
1201 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1202 } IEM_MC_ENDIF(); \
1203 IEM_MC_ADVANCE_RIP(); \
1204 IEM_MC_END(); \
1205 return VINF_SUCCESS; \
1206 \
1207 case IEMMODE_64BIT: \
1208 IEM_MC_BEGIN(0, 2); \
1209 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1210 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1211 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1212 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1213 a_Cnd { \
1214 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1215 } IEM_MC_ENDIF(); \
1216 IEM_MC_ADVANCE_RIP(); \
1217 IEM_MC_END(); \
1218 return VINF_SUCCESS; \
1219 \
1220 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1221 } \
1222 } do {} while (0)
1223
1224
1225
1226/** Opcode 0x0f 0x40. */
1227FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1228{
1229 IEMOP_MNEMONIC("cmovo Gv,Ev");
1230 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1231}
1232
1233
1234/** Opcode 0x0f 0x41. */
1235FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1236{
1237 IEMOP_MNEMONIC("cmovno Gv,Ev");
1238 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1239}
1240
1241
1242/** Opcode 0x0f 0x42. */
1243FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1244{
1245 IEMOP_MNEMONIC("cmovc Gv,Ev");
1246 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1247}
1248
1249
1250/** Opcode 0x0f 0x43. */
1251FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1252{
1253 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1254 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1255}
1256
1257
1258/** Opcode 0x0f 0x44. */
1259FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1260{
1261 IEMOP_MNEMONIC("cmove Gv,Ev");
1262 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1263}
1264
1265
1266/** Opcode 0x0f 0x45. */
1267FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1268{
1269 IEMOP_MNEMONIC("cmovne Gv,Ev");
1270 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1271}
1272
1273
1274/** Opcode 0x0f 0x46. */
1275FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1276{
1277 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1278 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1279}
1280
1281
1282/** Opcode 0x0f 0x47. */
1283FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1284{
1285 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1286 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1287}
1288
1289
1290/** Opcode 0x0f 0x48. */
1291FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1292{
1293 IEMOP_MNEMONIC("cmovs Gv,Ev");
1294 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1295}
1296
1297
1298/** Opcode 0x0f 0x49. */
1299FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1300{
1301 IEMOP_MNEMONIC("cmovns Gv,Ev");
1302 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1303}
1304
1305
1306/** Opcode 0x0f 0x4a. */
1307FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1308{
1309 IEMOP_MNEMONIC("cmovp Gv,Ev");
1310 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1311}
1312
1313
1314/** Opcode 0x0f 0x4b. */
1315FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1316{
1317 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1318 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1319}
1320
1321
1322/** Opcode 0x0f 0x4c. */
1323FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1324{
1325 IEMOP_MNEMONIC("cmovl Gv,Ev");
1326 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1327}
1328
1329
1330/** Opcode 0x0f 0x4d. */
1331FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1332{
1333 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1334 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1335}
1336
1337
1338/** Opcode 0x0f 0x4e. */
1339FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1340{
1341 IEMOP_MNEMONIC("cmovle Gv,Ev");
1342 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1343}
1344
1345
1346/** Opcode 0x0f 0x4f. */
1347FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1348{
1349 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1350 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1351}
1352
1353#undef CMOV_X
1354
1355/** Opcode 0x0f 0x50. */
1356FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1357/** Opcode 0x0f 0x51. */
1358FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1359/** Opcode 0x0f 0x52. */
1360FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1361/** Opcode 0x0f 0x53. */
1362FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1363/** Opcode 0x0f 0x54. */
1364FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1365/** Opcode 0x0f 0x55. */
1366FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1367/** Opcode 0x0f 0x56. */
1368FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1369/** Opcode 0x0f 0x57. */
1370FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1371/** Opcode 0x0f 0x58. */
1372FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd);
1373/** Opcode 0x0f 0x59. */
1374FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);
1375/** Opcode 0x0f 0x5a. */
1376FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1377/** Opcode 0x0f 0x5b. */
1378FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1379/** Opcode 0x0f 0x5c. */
1380FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1381/** Opcode 0x0f 0x5d. */
1382FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1383/** Opcode 0x0f 0x5e. */
1384FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1385/** Opcode 0x0f 0x5f. */
1386FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1387/** Opcode 0x0f 0x60. */
1388FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq);
1389/** Opcode 0x0f 0x61. */
1390FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq);
1391/** Opcode 0x0f 0x62. */
1392FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq);
1393/** Opcode 0x0f 0x63. */
1394FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
1395/** Opcode 0x0f 0x64. */
1396FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
1397/** Opcode 0x0f 0x65. */
1398FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
1399/** Opcode 0x0f 0x66. */
1400FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
1401/** Opcode 0x0f 0x67. */
1402FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
1403/** Opcode 0x0f 0x68. */
1404FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq);
1405/** Opcode 0x0f 0x69. */
1406FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq);
1407/** Opcode 0x0f 0x6a. */
1408FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq);
1409/** Opcode 0x0f 0x6b. */
1410FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
1411/** Opcode 0x0f 0x6c. */
1412FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq);
1413/** Opcode 0x0f 0x6d. */
1414FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq);
1415/** Opcode 0x0f 0x6e. */
1416FNIEMOP_STUB(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey);
1417/** Opcode 0x0f 0x6f. */
1418FNIEMOP_STUB(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq);
1419/** Opcode 0x0f 0x70. */
1420FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib);
1421/** Opcode 0x0f 0x71. */
1422FNIEMOP_STUB(iemOp_Grp12);
1423/** Opcode 0x0f 0x72. */
1424FNIEMOP_STUB(iemOp_Grp13);
1425/** Opcode 0x0f 0x73. */
1426FNIEMOP_STUB(iemOp_Grp14);
1427/** Opcode 0x0f 0x74. */
1428FNIEMOP_STUB(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq);
1429/** Opcode 0x0f 0x75. */
1430FNIEMOP_STUB(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq);
1431/** Opcode 0x0f 0x76. */
1432FNIEMOP_STUB(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq);
1433/** Opcode 0x0f 0x77. */
1434FNIEMOP_STUB(iemOp_emms);
1435/** Opcode 0x0f 0x78. */
1436FNIEMOP_STUB(iemOp_vmread);
1437/** Opcode 0x0f 0x79. */
1438FNIEMOP_STUB(iemOp_vmwrite);
1439/** Opcode 0x0f 0x7c. */
1440FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
1441/** Opcode 0x0f 0x7d. */
1442FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
1443/** Opcode 0x0f 0x7e. */
1444FNIEMOP_STUB(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq);
1445/** Opcode 0x0f 0x7f. */
1446FNIEMOP_STUB(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq);
1447
1448
1449/** Opcode 0x0f 0x80. */
1450FNIEMOP_DEF(iemOp_jo_Jv)
1451{
1452 IEMOP_MNEMONIC("jo Jv");
1453 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1454 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1455 {
1456 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1457 IEMOP_HLP_NO_LOCK_PREFIX();
1458
1459 IEM_MC_BEGIN(0, 0);
1460 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1461 IEM_MC_REL_JMP_S16(i16Imm);
1462 } IEM_MC_ELSE() {
1463 IEM_MC_ADVANCE_RIP();
1464 } IEM_MC_ENDIF();
1465 IEM_MC_END();
1466 }
1467 else
1468 {
1469 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1470 IEMOP_HLP_NO_LOCK_PREFIX();
1471
1472 IEM_MC_BEGIN(0, 0);
1473 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1474 IEM_MC_REL_JMP_S32(i32Imm);
1475 } IEM_MC_ELSE() {
1476 IEM_MC_ADVANCE_RIP();
1477 } IEM_MC_ENDIF();
1478 IEM_MC_END();
1479 }
1480 return VINF_SUCCESS;
1481}
1482
1483
1484/** Opcode 0x0f 0x81. */
1485FNIEMOP_DEF(iemOp_jno_Jv)
1486{
1487 IEMOP_MNEMONIC("jno Jv");
1488 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1489 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1490 {
1491 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1492 IEMOP_HLP_NO_LOCK_PREFIX();
1493
1494 IEM_MC_BEGIN(0, 0);
1495 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1496 IEM_MC_ADVANCE_RIP();
1497 } IEM_MC_ELSE() {
1498 IEM_MC_REL_JMP_S16(i16Imm);
1499 } IEM_MC_ENDIF();
1500 IEM_MC_END();
1501 }
1502 else
1503 {
1504 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1505 IEMOP_HLP_NO_LOCK_PREFIX();
1506
1507 IEM_MC_BEGIN(0, 0);
1508 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1509 IEM_MC_ADVANCE_RIP();
1510 } IEM_MC_ELSE() {
1511 IEM_MC_REL_JMP_S32(i32Imm);
1512 } IEM_MC_ENDIF();
1513 IEM_MC_END();
1514 }
1515 return VINF_SUCCESS;
1516}
1517
1518
1519/** Opcode 0x0f 0x82. */
1520FNIEMOP_DEF(iemOp_jc_Jv)
1521{
1522 IEMOP_MNEMONIC("jc/jb/jnae Jv");
1523 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1524 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1525 {
1526 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1527 IEMOP_HLP_NO_LOCK_PREFIX();
1528
1529 IEM_MC_BEGIN(0, 0);
1530 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1531 IEM_MC_REL_JMP_S16(i16Imm);
1532 } IEM_MC_ELSE() {
1533 IEM_MC_ADVANCE_RIP();
1534 } IEM_MC_ENDIF();
1535 IEM_MC_END();
1536 }
1537 else
1538 {
1539 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1540 IEMOP_HLP_NO_LOCK_PREFIX();
1541
1542 IEM_MC_BEGIN(0, 0);
1543 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1544 IEM_MC_REL_JMP_S32(i32Imm);
1545 } IEM_MC_ELSE() {
1546 IEM_MC_ADVANCE_RIP();
1547 } IEM_MC_ENDIF();
1548 IEM_MC_END();
1549 }
1550 return VINF_SUCCESS;
1551}
1552
1553
1554/** Opcode 0x0f 0x83. */
1555FNIEMOP_DEF(iemOp_jnc_Jv)
1556{
1557 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
1558 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1559 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1560 {
1561 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1562 IEMOP_HLP_NO_LOCK_PREFIX();
1563
1564 IEM_MC_BEGIN(0, 0);
1565 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1566 IEM_MC_ADVANCE_RIP();
1567 } IEM_MC_ELSE() {
1568 IEM_MC_REL_JMP_S16(i16Imm);
1569 } IEM_MC_ENDIF();
1570 IEM_MC_END();
1571 }
1572 else
1573 {
1574 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1575 IEMOP_HLP_NO_LOCK_PREFIX();
1576
1577 IEM_MC_BEGIN(0, 0);
1578 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1579 IEM_MC_ADVANCE_RIP();
1580 } IEM_MC_ELSE() {
1581 IEM_MC_REL_JMP_S32(i32Imm);
1582 } IEM_MC_ENDIF();
1583 IEM_MC_END();
1584 }
1585 return VINF_SUCCESS;
1586}
1587
1588
1589/** Opcode 0x0f 0x84. */
1590FNIEMOP_DEF(iemOp_je_Jv)
1591{
1592 IEMOP_MNEMONIC("je/jz Jv");
1593 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1594 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1595 {
1596 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1597 IEMOP_HLP_NO_LOCK_PREFIX();
1598
1599 IEM_MC_BEGIN(0, 0);
1600 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1601 IEM_MC_REL_JMP_S16(i16Imm);
1602 } IEM_MC_ELSE() {
1603 IEM_MC_ADVANCE_RIP();
1604 } IEM_MC_ENDIF();
1605 IEM_MC_END();
1606 }
1607 else
1608 {
1609 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1610 IEMOP_HLP_NO_LOCK_PREFIX();
1611
1612 IEM_MC_BEGIN(0, 0);
1613 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1614 IEM_MC_REL_JMP_S32(i32Imm);
1615 } IEM_MC_ELSE() {
1616 IEM_MC_ADVANCE_RIP();
1617 } IEM_MC_ENDIF();
1618 IEM_MC_END();
1619 }
1620 return VINF_SUCCESS;
1621}
1622
1623
1624/** Opcode 0x0f 0x85. */
1625FNIEMOP_DEF(iemOp_jne_Jv)
1626{
1627 IEMOP_MNEMONIC("jne/jnz Jv");
1628 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1629 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1630 {
1631 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1632 IEMOP_HLP_NO_LOCK_PREFIX();
1633
1634 IEM_MC_BEGIN(0, 0);
1635 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1636 IEM_MC_ADVANCE_RIP();
1637 } IEM_MC_ELSE() {
1638 IEM_MC_REL_JMP_S16(i16Imm);
1639 } IEM_MC_ENDIF();
1640 IEM_MC_END();
1641 }
1642 else
1643 {
1644 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1645 IEMOP_HLP_NO_LOCK_PREFIX();
1646
1647 IEM_MC_BEGIN(0, 0);
1648 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1649 IEM_MC_ADVANCE_RIP();
1650 } IEM_MC_ELSE() {
1651 IEM_MC_REL_JMP_S32(i32Imm);
1652 } IEM_MC_ENDIF();
1653 IEM_MC_END();
1654 }
1655 return VINF_SUCCESS;
1656}
1657
1658
1659/** Opcode 0x0f 0x86. */
1660FNIEMOP_DEF(iemOp_jbe_Jv)
1661{
1662 IEMOP_MNEMONIC("jbe/jna Jv");
1663 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1664 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1665 {
1666 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1667 IEMOP_HLP_NO_LOCK_PREFIX();
1668
1669 IEM_MC_BEGIN(0, 0);
1670 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1671 IEM_MC_REL_JMP_S16(i16Imm);
1672 } IEM_MC_ELSE() {
1673 IEM_MC_ADVANCE_RIP();
1674 } IEM_MC_ENDIF();
1675 IEM_MC_END();
1676 }
1677 else
1678 {
1679 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1680 IEMOP_HLP_NO_LOCK_PREFIX();
1681
1682 IEM_MC_BEGIN(0, 0);
1683 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1684 IEM_MC_REL_JMP_S32(i32Imm);
1685 } IEM_MC_ELSE() {
1686 IEM_MC_ADVANCE_RIP();
1687 } IEM_MC_ENDIF();
1688 IEM_MC_END();
1689 }
1690 return VINF_SUCCESS;
1691}
1692
1693
1694/** Opcode 0x0f 0x87. */
1695FNIEMOP_DEF(iemOp_jnbe_Jv)
1696{
1697 IEMOP_MNEMONIC("jnbe/ja Jv");
1698 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1699 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1700 {
1701 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1702 IEMOP_HLP_NO_LOCK_PREFIX();
1703
1704 IEM_MC_BEGIN(0, 0);
1705 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1706 IEM_MC_ADVANCE_RIP();
1707 } IEM_MC_ELSE() {
1708 IEM_MC_REL_JMP_S16(i16Imm);
1709 } IEM_MC_ENDIF();
1710 IEM_MC_END();
1711 }
1712 else
1713 {
1714 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1715 IEMOP_HLP_NO_LOCK_PREFIX();
1716
1717 IEM_MC_BEGIN(0, 0);
1718 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1719 IEM_MC_ADVANCE_RIP();
1720 } IEM_MC_ELSE() {
1721 IEM_MC_REL_JMP_S32(i32Imm);
1722 } IEM_MC_ENDIF();
1723 IEM_MC_END();
1724 }
1725 return VINF_SUCCESS;
1726}
1727
1728
1729/** Opcode 0x0f 0x88. */
1730FNIEMOP_DEF(iemOp_js_Jv)
1731{
1732 IEMOP_MNEMONIC("js Jv");
1733 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1734 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1735 {
1736 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1737 IEMOP_HLP_NO_LOCK_PREFIX();
1738
1739 IEM_MC_BEGIN(0, 0);
1740 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1741 IEM_MC_REL_JMP_S16(i16Imm);
1742 } IEM_MC_ELSE() {
1743 IEM_MC_ADVANCE_RIP();
1744 } IEM_MC_ENDIF();
1745 IEM_MC_END();
1746 }
1747 else
1748 {
1749 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1750 IEMOP_HLP_NO_LOCK_PREFIX();
1751
1752 IEM_MC_BEGIN(0, 0);
1753 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1754 IEM_MC_REL_JMP_S32(i32Imm);
1755 } IEM_MC_ELSE() {
1756 IEM_MC_ADVANCE_RIP();
1757 } IEM_MC_ENDIF();
1758 IEM_MC_END();
1759 }
1760 return VINF_SUCCESS;
1761}
1762
1763
1764/** Opcode 0x0f 0x89. */
1765FNIEMOP_DEF(iemOp_jns_Jv)
1766{
1767 IEMOP_MNEMONIC("jns Jv");
1768 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1769 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1770 {
1771 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1772 IEMOP_HLP_NO_LOCK_PREFIX();
1773
1774 IEM_MC_BEGIN(0, 0);
1775 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1776 IEM_MC_ADVANCE_RIP();
1777 } IEM_MC_ELSE() {
1778 IEM_MC_REL_JMP_S16(i16Imm);
1779 } IEM_MC_ENDIF();
1780 IEM_MC_END();
1781 }
1782 else
1783 {
1784 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1785 IEMOP_HLP_NO_LOCK_PREFIX();
1786
1787 IEM_MC_BEGIN(0, 0);
1788 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1789 IEM_MC_ADVANCE_RIP();
1790 } IEM_MC_ELSE() {
1791 IEM_MC_REL_JMP_S32(i32Imm);
1792 } IEM_MC_ENDIF();
1793 IEM_MC_END();
1794 }
1795 return VINF_SUCCESS;
1796}
1797
1798
1799/** Opcode 0x0f 0x8a. */
1800FNIEMOP_DEF(iemOp_jp_Jv)
1801{
1802 IEMOP_MNEMONIC("jp Jv");
1803 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1804 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1805 {
1806 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1807 IEMOP_HLP_NO_LOCK_PREFIX();
1808
1809 IEM_MC_BEGIN(0, 0);
1810 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1811 IEM_MC_REL_JMP_S16(i16Imm);
1812 } IEM_MC_ELSE() {
1813 IEM_MC_ADVANCE_RIP();
1814 } IEM_MC_ENDIF();
1815 IEM_MC_END();
1816 }
1817 else
1818 {
1819 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1820 IEMOP_HLP_NO_LOCK_PREFIX();
1821
1822 IEM_MC_BEGIN(0, 0);
1823 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1824 IEM_MC_REL_JMP_S32(i32Imm);
1825 } IEM_MC_ELSE() {
1826 IEM_MC_ADVANCE_RIP();
1827 } IEM_MC_ENDIF();
1828 IEM_MC_END();
1829 }
1830 return VINF_SUCCESS;
1831}
1832
1833
1834/** Opcode 0x0f 0x8b. */
1835FNIEMOP_DEF(iemOp_jnp_Jv)
1836{
1837 IEMOP_MNEMONIC("jo Jv");
1838 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1839 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1840 {
1841 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1842 IEMOP_HLP_NO_LOCK_PREFIX();
1843
1844 IEM_MC_BEGIN(0, 0);
1845 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1846 IEM_MC_ADVANCE_RIP();
1847 } IEM_MC_ELSE() {
1848 IEM_MC_REL_JMP_S16(i16Imm);
1849 } IEM_MC_ENDIF();
1850 IEM_MC_END();
1851 }
1852 else
1853 {
1854 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1855 IEMOP_HLP_NO_LOCK_PREFIX();
1856
1857 IEM_MC_BEGIN(0, 0);
1858 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1859 IEM_MC_ADVANCE_RIP();
1860 } IEM_MC_ELSE() {
1861 IEM_MC_REL_JMP_S32(i32Imm);
1862 } IEM_MC_ENDIF();
1863 IEM_MC_END();
1864 }
1865 return VINF_SUCCESS;
1866}
1867
1868
1869/** Opcode 0x0f 0x8c. */
1870FNIEMOP_DEF(iemOp_jl_Jv)
1871{
1872 IEMOP_MNEMONIC("jl/jnge Jv");
1873 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1874 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1875 {
1876 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1877 IEMOP_HLP_NO_LOCK_PREFIX();
1878
1879 IEM_MC_BEGIN(0, 0);
1880 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1881 IEM_MC_REL_JMP_S16(i16Imm);
1882 } IEM_MC_ELSE() {
1883 IEM_MC_ADVANCE_RIP();
1884 } IEM_MC_ENDIF();
1885 IEM_MC_END();
1886 }
1887 else
1888 {
1889 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1890 IEMOP_HLP_NO_LOCK_PREFIX();
1891
1892 IEM_MC_BEGIN(0, 0);
1893 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1894 IEM_MC_REL_JMP_S32(i32Imm);
1895 } IEM_MC_ELSE() {
1896 IEM_MC_ADVANCE_RIP();
1897 } IEM_MC_ENDIF();
1898 IEM_MC_END();
1899 }
1900 return VINF_SUCCESS;
1901}
1902
1903
1904/** Opcode 0x0f 0x8d. */
1905FNIEMOP_DEF(iemOp_jnl_Jv)
1906{
1907 IEMOP_MNEMONIC("jnl/jge Jv");
1908 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1909 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1910 {
1911 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1912 IEMOP_HLP_NO_LOCK_PREFIX();
1913
1914 IEM_MC_BEGIN(0, 0);
1915 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1916 IEM_MC_ADVANCE_RIP();
1917 } IEM_MC_ELSE() {
1918 IEM_MC_REL_JMP_S16(i16Imm);
1919 } IEM_MC_ENDIF();
1920 IEM_MC_END();
1921 }
1922 else
1923 {
1924 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1925 IEMOP_HLP_NO_LOCK_PREFIX();
1926
1927 IEM_MC_BEGIN(0, 0);
1928 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1929 IEM_MC_ADVANCE_RIP();
1930 } IEM_MC_ELSE() {
1931 IEM_MC_REL_JMP_S32(i32Imm);
1932 } IEM_MC_ENDIF();
1933 IEM_MC_END();
1934 }
1935 return VINF_SUCCESS;
1936}
1937
1938
1939/** Opcode 0x0f 0x8e. */
1940FNIEMOP_DEF(iemOp_jle_Jv)
1941{
1942 IEMOP_MNEMONIC("jle/jng Jv");
1943 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1944 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1945 {
1946 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1947 IEMOP_HLP_NO_LOCK_PREFIX();
1948
1949 IEM_MC_BEGIN(0, 0);
1950 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1951 IEM_MC_REL_JMP_S16(i16Imm);
1952 } IEM_MC_ELSE() {
1953 IEM_MC_ADVANCE_RIP();
1954 } IEM_MC_ENDIF();
1955 IEM_MC_END();
1956 }
1957 else
1958 {
1959 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1960 IEMOP_HLP_NO_LOCK_PREFIX();
1961
1962 IEM_MC_BEGIN(0, 0);
1963 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1964 IEM_MC_REL_JMP_S32(i32Imm);
1965 } IEM_MC_ELSE() {
1966 IEM_MC_ADVANCE_RIP();
1967 } IEM_MC_ENDIF();
1968 IEM_MC_END();
1969 }
1970 return VINF_SUCCESS;
1971}
1972
1973
1974/** Opcode 0x0f 0x8f. */
1975FNIEMOP_DEF(iemOp_jnle_Jv)
1976{
1977 IEMOP_MNEMONIC("jnle/jg Jv");
1978 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1979 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1980 {
1981 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1982 IEMOP_HLP_NO_LOCK_PREFIX();
1983
1984 IEM_MC_BEGIN(0, 0);
1985 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1986 IEM_MC_ADVANCE_RIP();
1987 } IEM_MC_ELSE() {
1988 IEM_MC_REL_JMP_S16(i16Imm);
1989 } IEM_MC_ENDIF();
1990 IEM_MC_END();
1991 }
1992 else
1993 {
1994 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1995 IEMOP_HLP_NO_LOCK_PREFIX();
1996
1997 IEM_MC_BEGIN(0, 0);
1998 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1999 IEM_MC_ADVANCE_RIP();
2000 } IEM_MC_ELSE() {
2001 IEM_MC_REL_JMP_S32(i32Imm);
2002 } IEM_MC_ENDIF();
2003 IEM_MC_END();
2004 }
2005 return VINF_SUCCESS;
2006}
2007
2008
2009/** Opcode 0x0f 0x90. */
2010FNIEMOP_DEF(iemOp_seto_Eb)
2011{
2012 IEMOP_MNEMONIC("seto Eb");
2013 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2014 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2015
2016 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2017 * any way. AMD says it's "unused", whatever that means. We're
2018 * ignoring for now. */
2019 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2020 {
2021 /* register target */
2022 IEM_MC_BEGIN(0, 0);
2023 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2024 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2025 } IEM_MC_ELSE() {
2026 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2027 } IEM_MC_ENDIF();
2028 IEM_MC_ADVANCE_RIP();
2029 IEM_MC_END();
2030 }
2031 else
2032 {
2033 /* memory target */
2034 IEM_MC_BEGIN(0, 1);
2035 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2036 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2037 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2038 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2039 } IEM_MC_ELSE() {
2040 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2041 } IEM_MC_ENDIF();
2042 IEM_MC_ADVANCE_RIP();
2043 IEM_MC_END();
2044 }
2045 return VINF_SUCCESS;
2046}
2047
2048
2049/** Opcode 0x0f 0x91. */
2050FNIEMOP_DEF(iemOp_setno_Eb)
2051{
2052 IEMOP_MNEMONIC("setno Eb");
2053 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2054 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2055
2056 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2057 * any way. AMD says it's "unused", whatever that means. We're
2058 * ignoring for now. */
2059 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2060 {
2061 /* register target */
2062 IEM_MC_BEGIN(0, 0);
2063 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2064 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2065 } IEM_MC_ELSE() {
2066 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2067 } IEM_MC_ENDIF();
2068 IEM_MC_ADVANCE_RIP();
2069 IEM_MC_END();
2070 }
2071 else
2072 {
2073 /* memory target */
2074 IEM_MC_BEGIN(0, 1);
2075 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2076 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2077 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2078 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2079 } IEM_MC_ELSE() {
2080 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2081 } IEM_MC_ENDIF();
2082 IEM_MC_ADVANCE_RIP();
2083 IEM_MC_END();
2084 }
2085 return VINF_SUCCESS;
2086}
2087
2088
2089/** Opcode 0x0f 0x92. */
2090FNIEMOP_DEF(iemOp_setc_Eb)
2091{
2092 IEMOP_MNEMONIC("setc Eb");
2093 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2094 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2095
2096 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2097 * any way. AMD says it's "unused", whatever that means. We're
2098 * ignoring for now. */
2099 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2100 {
2101 /* register target */
2102 IEM_MC_BEGIN(0, 0);
2103 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2104 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2105 } IEM_MC_ELSE() {
2106 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2107 } IEM_MC_ENDIF();
2108 IEM_MC_ADVANCE_RIP();
2109 IEM_MC_END();
2110 }
2111 else
2112 {
2113 /* memory target */
2114 IEM_MC_BEGIN(0, 1);
2115 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2116 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2117 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2118 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2119 } IEM_MC_ELSE() {
2120 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2121 } IEM_MC_ENDIF();
2122 IEM_MC_ADVANCE_RIP();
2123 IEM_MC_END();
2124 }
2125 return VINF_SUCCESS;
2126}
2127
2128
2129/** Opcode 0x0f 0x93. */
2130FNIEMOP_DEF(iemOp_setnc_Eb)
2131{
2132 IEMOP_MNEMONIC("setnc Eb");
2133 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2134 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2135
2136 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2137 * any way. AMD says it's "unused", whatever that means. We're
2138 * ignoring for now. */
2139 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2140 {
2141 /* register target */
2142 IEM_MC_BEGIN(0, 0);
2143 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2144 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2145 } IEM_MC_ELSE() {
2146 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2147 } IEM_MC_ENDIF();
2148 IEM_MC_ADVANCE_RIP();
2149 IEM_MC_END();
2150 }
2151 else
2152 {
2153 /* memory target */
2154 IEM_MC_BEGIN(0, 1);
2155 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2156 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2157 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2158 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2159 } IEM_MC_ELSE() {
2160 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2161 } IEM_MC_ENDIF();
2162 IEM_MC_ADVANCE_RIP();
2163 IEM_MC_END();
2164 }
2165 return VINF_SUCCESS;
2166}
2167
2168
2169/** Opcode 0x0f 0x94. */
2170FNIEMOP_DEF(iemOp_sete_Eb)
2171{
2172 IEMOP_MNEMONIC("sete Eb");
2173 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2174 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2175
2176 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2177 * any way. AMD says it's "unused", whatever that means. We're
2178 * ignoring for now. */
2179 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2180 {
2181 /* register target */
2182 IEM_MC_BEGIN(0, 0);
2183 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2184 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2185 } IEM_MC_ELSE() {
2186 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2187 } IEM_MC_ENDIF();
2188 IEM_MC_ADVANCE_RIP();
2189 IEM_MC_END();
2190 }
2191 else
2192 {
2193 /* memory target */
2194 IEM_MC_BEGIN(0, 1);
2195 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2196 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2197 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2198 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2199 } IEM_MC_ELSE() {
2200 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2201 } IEM_MC_ENDIF();
2202 IEM_MC_ADVANCE_RIP();
2203 IEM_MC_END();
2204 }
2205 return VINF_SUCCESS;
2206}
2207
2208
2209/** Opcode 0x0f 0x95. */
2210FNIEMOP_DEF(iemOp_setne_Eb)
2211{
2212 IEMOP_MNEMONIC("setne Eb");
2213 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2214 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2215
2216 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2217 * any way. AMD says it's "unused", whatever that means. We're
2218 * ignoring for now. */
2219 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2220 {
2221 /* register target */
2222 IEM_MC_BEGIN(0, 0);
2223 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2224 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2225 } IEM_MC_ELSE() {
2226 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2227 } IEM_MC_ENDIF();
2228 IEM_MC_ADVANCE_RIP();
2229 IEM_MC_END();
2230 }
2231 else
2232 {
2233 /* memory target */
2234 IEM_MC_BEGIN(0, 1);
2235 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2236 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2237 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2238 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2239 } IEM_MC_ELSE() {
2240 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2241 } IEM_MC_ENDIF();
2242 IEM_MC_ADVANCE_RIP();
2243 IEM_MC_END();
2244 }
2245 return VINF_SUCCESS;
2246}
2247
2248
2249/** Opcode 0x0f 0x96. */
2250FNIEMOP_DEF(iemOp_setbe_Eb)
2251{
2252 IEMOP_MNEMONIC("setbe Eb");
2253 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2254 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2255
2256 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2257 * any way. AMD says it's "unused", whatever that means. We're
2258 * ignoring for now. */
2259 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2260 {
2261 /* register target */
2262 IEM_MC_BEGIN(0, 0);
2263 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2264 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2265 } IEM_MC_ELSE() {
2266 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2267 } IEM_MC_ENDIF();
2268 IEM_MC_ADVANCE_RIP();
2269 IEM_MC_END();
2270 }
2271 else
2272 {
2273 /* memory target */
2274 IEM_MC_BEGIN(0, 1);
2275 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2276 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2277 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2278 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2279 } IEM_MC_ELSE() {
2280 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2281 } IEM_MC_ENDIF();
2282 IEM_MC_ADVANCE_RIP();
2283 IEM_MC_END();
2284 }
2285 return VINF_SUCCESS;
2286}
2287
2288
2289/** Opcode 0x0f 0x97. */
2290FNIEMOP_DEF(iemOp_setnbe_Eb)
2291{
2292 IEMOP_MNEMONIC("setnbe Eb");
2293 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2294 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2295
2296 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2297 * any way. AMD says it's "unused", whatever that means. We're
2298 * ignoring for now. */
2299 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2300 {
2301 /* register target */
2302 IEM_MC_BEGIN(0, 0);
2303 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2304 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2305 } IEM_MC_ELSE() {
2306 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2307 } IEM_MC_ENDIF();
2308 IEM_MC_ADVANCE_RIP();
2309 IEM_MC_END();
2310 }
2311 else
2312 {
2313 /* memory target */
2314 IEM_MC_BEGIN(0, 1);
2315 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2316 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2317 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2318 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2319 } IEM_MC_ELSE() {
2320 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2321 } IEM_MC_ENDIF();
2322 IEM_MC_ADVANCE_RIP();
2323 IEM_MC_END();
2324 }
2325 return VINF_SUCCESS;
2326}
2327
2328
2329/** Opcode 0x0f 0x98. */
2330FNIEMOP_DEF(iemOp_sets_Eb)
2331{
2332 IEMOP_MNEMONIC("sets Eb");
2333 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2334 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2335
2336 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2337 * any way. AMD says it's "unused", whatever that means. We're
2338 * ignoring for now. */
2339 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2340 {
2341 /* register target */
2342 IEM_MC_BEGIN(0, 0);
2343 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2344 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2345 } IEM_MC_ELSE() {
2346 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2347 } IEM_MC_ENDIF();
2348 IEM_MC_ADVANCE_RIP();
2349 IEM_MC_END();
2350 }
2351 else
2352 {
2353 /* memory target */
2354 IEM_MC_BEGIN(0, 1);
2355 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2356 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2357 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2358 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2359 } IEM_MC_ELSE() {
2360 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2361 } IEM_MC_ENDIF();
2362 IEM_MC_ADVANCE_RIP();
2363 IEM_MC_END();
2364 }
2365 return VINF_SUCCESS;
2366}
2367
2368
2369/** Opcode 0x0f 0x99. */
2370FNIEMOP_DEF(iemOp_setns_Eb)
2371{
2372 IEMOP_MNEMONIC("setns Eb");
2373 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2374 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2375
2376 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2377 * any way. AMD says it's "unused", whatever that means. We're
2378 * ignoring for now. */
2379 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2380 {
2381 /* register target */
2382 IEM_MC_BEGIN(0, 0);
2383 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2384 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2385 } IEM_MC_ELSE() {
2386 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2387 } IEM_MC_ENDIF();
2388 IEM_MC_ADVANCE_RIP();
2389 IEM_MC_END();
2390 }
2391 else
2392 {
2393 /* memory target */
2394 IEM_MC_BEGIN(0, 1);
2395 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2396 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2397 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2398 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2399 } IEM_MC_ELSE() {
2400 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2401 } IEM_MC_ENDIF();
2402 IEM_MC_ADVANCE_RIP();
2403 IEM_MC_END();
2404 }
2405 return VINF_SUCCESS;
2406}
2407
2408
2409/** Opcode 0x0f 0x9a. */
2410FNIEMOP_DEF(iemOp_setp_Eb)
2411{
2412 IEMOP_MNEMONIC("setnp Eb");
2413 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2414 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2415
2416 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2417 * any way. AMD says it's "unused", whatever that means. We're
2418 * ignoring for now. */
2419 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2420 {
2421 /* register target */
2422 IEM_MC_BEGIN(0, 0);
2423 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2424 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2425 } IEM_MC_ELSE() {
2426 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2427 } IEM_MC_ENDIF();
2428 IEM_MC_ADVANCE_RIP();
2429 IEM_MC_END();
2430 }
2431 else
2432 {
2433 /* memory target */
2434 IEM_MC_BEGIN(0, 1);
2435 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2436 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2437 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2438 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2439 } IEM_MC_ELSE() {
2440 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2441 } IEM_MC_ENDIF();
2442 IEM_MC_ADVANCE_RIP();
2443 IEM_MC_END();
2444 }
2445 return VINF_SUCCESS;
2446}
2447
2448
2449/** Opcode 0x0f 0x9b. */
2450FNIEMOP_DEF(iemOp_setnp_Eb)
2451{
2452 IEMOP_MNEMONIC("setnp Eb");
2453 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2454 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2455
2456 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2457 * any way. AMD says it's "unused", whatever that means. We're
2458 * ignoring for now. */
2459 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2460 {
2461 /* register target */
2462 IEM_MC_BEGIN(0, 0);
2463 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2464 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2465 } IEM_MC_ELSE() {
2466 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2467 } IEM_MC_ENDIF();
2468 IEM_MC_ADVANCE_RIP();
2469 IEM_MC_END();
2470 }
2471 else
2472 {
2473 /* memory target */
2474 IEM_MC_BEGIN(0, 1);
2475 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2476 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2477 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2478 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2479 } IEM_MC_ELSE() {
2480 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2481 } IEM_MC_ENDIF();
2482 IEM_MC_ADVANCE_RIP();
2483 IEM_MC_END();
2484 }
2485 return VINF_SUCCESS;
2486}
2487
2488
2489/** Opcode 0x0f 0x9c. */
2490FNIEMOP_DEF(iemOp_setl_Eb)
2491{
2492 IEMOP_MNEMONIC("setl Eb");
2493 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2494 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2495
2496 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2497 * any way. AMD says it's "unused", whatever that means. We're
2498 * ignoring for now. */
2499 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2500 {
2501 /* register target */
2502 IEM_MC_BEGIN(0, 0);
2503 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2504 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2505 } IEM_MC_ELSE() {
2506 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2507 } IEM_MC_ENDIF();
2508 IEM_MC_ADVANCE_RIP();
2509 IEM_MC_END();
2510 }
2511 else
2512 {
2513 /* memory target */
2514 IEM_MC_BEGIN(0, 1);
2515 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2516 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2517 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2518 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2519 } IEM_MC_ELSE() {
2520 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2521 } IEM_MC_ENDIF();
2522 IEM_MC_ADVANCE_RIP();
2523 IEM_MC_END();
2524 }
2525 return VINF_SUCCESS;
2526}
2527
2528
2529/** Opcode 0x0f 0x9d. */
2530FNIEMOP_DEF(iemOp_setnl_Eb)
2531{
2532 IEMOP_MNEMONIC("setnl Eb");
2533 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2534 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2535
2536 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2537 * any way. AMD says it's "unused", whatever that means. We're
2538 * ignoring for now. */
2539 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2540 {
2541 /* register target */
2542 IEM_MC_BEGIN(0, 0);
2543 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2544 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2545 } IEM_MC_ELSE() {
2546 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2547 } IEM_MC_ENDIF();
2548 IEM_MC_ADVANCE_RIP();
2549 IEM_MC_END();
2550 }
2551 else
2552 {
2553 /* memory target */
2554 IEM_MC_BEGIN(0, 1);
2555 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2556 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2557 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2558 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2559 } IEM_MC_ELSE() {
2560 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2561 } IEM_MC_ENDIF();
2562 IEM_MC_ADVANCE_RIP();
2563 IEM_MC_END();
2564 }
2565 return VINF_SUCCESS;
2566}
2567
2568
2569/** Opcode 0x0f 0x9e. */
2570FNIEMOP_DEF(iemOp_setle_Eb)
2571{
2572 IEMOP_MNEMONIC("setle Eb");
2573 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2574 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2575
2576 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2577 * any way. AMD says it's "unused", whatever that means. We're
2578 * ignoring for now. */
2579 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2580 {
2581 /* register target */
2582 IEM_MC_BEGIN(0, 0);
2583 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2584 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2585 } IEM_MC_ELSE() {
2586 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2587 } IEM_MC_ENDIF();
2588 IEM_MC_ADVANCE_RIP();
2589 IEM_MC_END();
2590 }
2591 else
2592 {
2593 /* memory target */
2594 IEM_MC_BEGIN(0, 1);
2595 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2596 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2597 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2598 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2599 } IEM_MC_ELSE() {
2600 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2601 } IEM_MC_ENDIF();
2602 IEM_MC_ADVANCE_RIP();
2603 IEM_MC_END();
2604 }
2605 return VINF_SUCCESS;
2606}
2607
2608
2609/** Opcode 0x0f 0x9f. */
2610FNIEMOP_DEF(iemOp_setnle_Eb)
2611{
2612 IEMOP_MNEMONIC("setnle Eb");
2613 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2614 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2615
2616 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2617 * any way. AMD says it's "unused", whatever that means. We're
2618 * ignoring for now. */
2619 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2620 {
2621 /* register target */
2622 IEM_MC_BEGIN(0, 0);
2623 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2624 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2625 } IEM_MC_ELSE() {
2626 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2627 } IEM_MC_ENDIF();
2628 IEM_MC_ADVANCE_RIP();
2629 IEM_MC_END();
2630 }
2631 else
2632 {
2633 /* memory target */
2634 IEM_MC_BEGIN(0, 1);
2635 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2636 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2637 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2638 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2639 } IEM_MC_ELSE() {
2640 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2641 } IEM_MC_ENDIF();
2642 IEM_MC_ADVANCE_RIP();
2643 IEM_MC_END();
2644 }
2645 return VINF_SUCCESS;
2646}
2647
2648
2649/**
2650 * Common 'push segment-register' helper.
2651 */
2652FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
2653{
2654 IEMOP_HLP_NO_LOCK_PREFIX();
2655 if (iReg < X86_SREG_FS)
2656 IEMOP_HLP_NO_64BIT();
2657 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2658
2659 switch (pIemCpu->enmEffOpSize)
2660 {
2661 case IEMMODE_16BIT:
2662 IEM_MC_BEGIN(0, 1);
2663 IEM_MC_LOCAL(uint16_t, u16Value);
2664 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
2665 IEM_MC_PUSH_U16(u16Value);
2666 IEM_MC_ADVANCE_RIP();
2667 IEM_MC_END();
2668 break;
2669
2670 case IEMMODE_32BIT:
2671 IEM_MC_BEGIN(0, 1);
2672 IEM_MC_LOCAL(uint32_t, u32Value);
2673 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
2674 IEM_MC_PUSH_U32(u32Value);
2675 IEM_MC_ADVANCE_RIP();
2676 IEM_MC_END();
2677 break;
2678
2679 case IEMMODE_64BIT:
2680 IEM_MC_BEGIN(0, 1);
2681 IEM_MC_LOCAL(uint64_t, u64Value);
2682 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
2683 IEM_MC_PUSH_U64(u64Value);
2684 IEM_MC_ADVANCE_RIP();
2685 IEM_MC_END();
2686 break;
2687 }
2688
2689 return VINF_SUCCESS;
2690}
2691
2692
2693/** Opcode 0x0f 0xa0. */
2694FNIEMOP_DEF(iemOp_push_fs)
2695{
2696 IEMOP_MNEMONIC("push fs");
2697 IEMOP_HLP_NO_LOCK_PREFIX();
2698 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
2699}
2700
2701
2702/** Opcode 0x0f 0xa1. */
2703FNIEMOP_DEF(iemOp_pop_fs)
2704{
2705 IEMOP_MNEMONIC("pop fs");
2706 IEMOP_HLP_NO_LOCK_PREFIX();
2707 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
2708}
2709
2710
2711/** Opcode 0x0f 0xa2. */
2712FNIEMOP_DEF(iemOp_cpuid)
2713{
2714 IEMOP_MNEMONIC("cpuid");
2715 IEMOP_HLP_NO_LOCK_PREFIX();
2716 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
2717}
2718
2719
2720/**
2721 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
2722 * iemOp_bts_Ev_Gv.
2723 */
2724FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
2725{
2726 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2727 IEMOP_HLP_NO_LOCK_PREFIX();
2728 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
2729
2730 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2731 {
2732 /* register destination. */
2733 IEMOP_HLP_NO_LOCK_PREFIX();
2734 switch (pIemCpu->enmEffOpSize)
2735 {
2736 case IEMMODE_16BIT:
2737 IEM_MC_BEGIN(3, 0);
2738 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2739 IEM_MC_ARG(uint16_t, u16Src, 1);
2740 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2741
2742 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2743 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
2744 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2745 IEM_MC_REF_EFLAGS(pEFlags);
2746 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2747
2748 IEM_MC_ADVANCE_RIP();
2749 IEM_MC_END();
2750 return VINF_SUCCESS;
2751
2752 case IEMMODE_32BIT:
2753 IEM_MC_BEGIN(3, 0);
2754 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2755 IEM_MC_ARG(uint32_t, u32Src, 1);
2756 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2757
2758 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2759 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
2760 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2761 IEM_MC_REF_EFLAGS(pEFlags);
2762 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2763
2764 IEM_MC_ADVANCE_RIP();
2765 IEM_MC_END();
2766 return VINF_SUCCESS;
2767
2768 case IEMMODE_64BIT:
2769 IEM_MC_BEGIN(3, 0);
2770 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2771 IEM_MC_ARG(uint64_t, u64Src, 1);
2772 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2773
2774 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2775 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
2776 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2777 IEM_MC_REF_EFLAGS(pEFlags);
2778 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2779
2780 IEM_MC_ADVANCE_RIP();
2781 IEM_MC_END();
2782 return VINF_SUCCESS;
2783
2784 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2785 }
2786 }
2787 else
2788 {
2789 /* memory destination. */
2790
2791 uint32_t fAccess;
2792 if (pImpl->pfnLockedU16)
2793 fAccess = IEM_ACCESS_DATA_RW;
2794 else /* BT */
2795 {
2796 IEMOP_HLP_NO_LOCK_PREFIX();
2797 fAccess = IEM_ACCESS_DATA_R;
2798 }
2799
2800 /** @todo test negative bit offsets! */
2801 switch (pIemCpu->enmEffOpSize)
2802 {
2803 case IEMMODE_16BIT:
2804 IEM_MC_BEGIN(3, 2);
2805 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2806 IEM_MC_ARG(uint16_t, u16Src, 1);
2807 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2808 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2809 IEM_MC_LOCAL(int16_t, i16AddrAdj);
2810
2811 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2812 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2813 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
2814 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
2815 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
2816 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
2817 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
2818 IEM_MC_FETCH_EFLAGS(EFlags);
2819
2820 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2821 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2822 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2823 else
2824 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
2825 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2826
2827 IEM_MC_COMMIT_EFLAGS(EFlags);
2828 IEM_MC_ADVANCE_RIP();
2829 IEM_MC_END();
2830 return VINF_SUCCESS;
2831
2832 case IEMMODE_32BIT:
2833 IEM_MC_BEGIN(3, 2);
2834 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2835 IEM_MC_ARG(uint32_t, u32Src, 1);
2836 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2837 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2838 IEM_MC_LOCAL(int32_t, i32AddrAdj);
2839
2840 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2841 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2842 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
2843 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
2844 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
2845 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
2846 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
2847 IEM_MC_FETCH_EFLAGS(EFlags);
2848
2849 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2850 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2851 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2852 else
2853 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
2854 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
2855
2856 IEM_MC_COMMIT_EFLAGS(EFlags);
2857 IEM_MC_ADVANCE_RIP();
2858 IEM_MC_END();
2859 return VINF_SUCCESS;
2860
2861 case IEMMODE_64BIT:
2862 IEM_MC_BEGIN(3, 2);
2863 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2864 IEM_MC_ARG(uint64_t, u64Src, 1);
2865 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2866 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2867 IEM_MC_LOCAL(int64_t, i64AddrAdj);
2868
2869 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2870 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2871 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
2872 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
2873 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
2874 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
2875 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
2876 IEM_MC_FETCH_EFLAGS(EFlags);
2877
2878 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2879 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2880 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2881 else
2882 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
2883 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
2884
2885 IEM_MC_COMMIT_EFLAGS(EFlags);
2886 IEM_MC_ADVANCE_RIP();
2887 IEM_MC_END();
2888 return VINF_SUCCESS;
2889
2890 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2891 }
2892 }
2893}
2894
2895
2896/** Opcode 0x0f 0xa3. */
2897FNIEMOP_DEF(iemOp_bt_Ev_Gv)
2898{
2899 IEMOP_MNEMONIC("bt Gv,Mp");
2900 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
2901}
2902
2903
2904/**
2905 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
2906 */
2907FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
2908{
2909 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2910 IEMOP_HLP_NO_LOCK_PREFIX();
2911 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
2912
2913 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2914 {
2915 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2916 IEMOP_HLP_NO_LOCK_PREFIX();
2917
2918 switch (pIemCpu->enmEffOpSize)
2919 {
2920 case IEMMODE_16BIT:
2921 IEM_MC_BEGIN(4, 0);
2922 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2923 IEM_MC_ARG(uint16_t, u16Src, 1);
2924 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2925 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2926
2927 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2928 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2929 IEM_MC_REF_EFLAGS(pEFlags);
2930 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2931
2932 IEM_MC_ADVANCE_RIP();
2933 IEM_MC_END();
2934 return VINF_SUCCESS;
2935
2936 case IEMMODE_32BIT:
2937 IEM_MC_BEGIN(4, 0);
2938 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2939 IEM_MC_ARG(uint32_t, u32Src, 1);
2940 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2941 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2942
2943 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2944 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2945 IEM_MC_REF_EFLAGS(pEFlags);
2946 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
2947
2948 IEM_MC_ADVANCE_RIP();
2949 IEM_MC_END();
2950 return VINF_SUCCESS;
2951
2952 case IEMMODE_64BIT:
2953 IEM_MC_BEGIN(4, 0);
2954 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2955 IEM_MC_ARG(uint64_t, u64Src, 1);
2956 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2957 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2958
2959 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2960 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2961 IEM_MC_REF_EFLAGS(pEFlags);
2962 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
2963
2964 IEM_MC_ADVANCE_RIP();
2965 IEM_MC_END();
2966 return VINF_SUCCESS;
2967
2968 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2969 }
2970 }
2971 else
2972 {
2973 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2974
2975 switch (pIemCpu->enmEffOpSize)
2976 {
2977 case IEMMODE_16BIT:
2978 IEM_MC_BEGIN(4, 2);
2979 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2980 IEM_MC_ARG(uint16_t, u16Src, 1);
2981 IEM_MC_ARG(uint8_t, cShiftArg, 2);
2982 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
2983 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2984
2985 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2986 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2987 IEM_MC_ASSIGN(cShiftArg, cShift);
2988 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2989 IEM_MC_FETCH_EFLAGS(EFlags);
2990 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2991 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2992
2993 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2994 IEM_MC_COMMIT_EFLAGS(EFlags);
2995 IEM_MC_ADVANCE_RIP();
2996 IEM_MC_END();
2997 return VINF_SUCCESS;
2998
2999 case IEMMODE_32BIT:
3000 IEM_MC_BEGIN(4, 2);
3001 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3002 IEM_MC_ARG(uint32_t, u32Src, 1);
3003 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3004 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3005 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3006
3007 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3008 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3009 IEM_MC_ASSIGN(cShiftArg, cShift);
3010 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3011 IEM_MC_FETCH_EFLAGS(EFlags);
3012 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3013 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3014
3015 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3016 IEM_MC_COMMIT_EFLAGS(EFlags);
3017 IEM_MC_ADVANCE_RIP();
3018 IEM_MC_END();
3019 return VINF_SUCCESS;
3020
3021 case IEMMODE_64BIT:
3022 IEM_MC_BEGIN(4, 2);
3023 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3024 IEM_MC_ARG(uint64_t, u64Src, 1);
3025 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3026 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3027 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3028
3029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3030 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3031 IEM_MC_ASSIGN(cShiftArg, cShift);
3032 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3033 IEM_MC_FETCH_EFLAGS(EFlags);
3034 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3035 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3036
3037 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3038 IEM_MC_COMMIT_EFLAGS(EFlags);
3039 IEM_MC_ADVANCE_RIP();
3040 IEM_MC_END();
3041 return VINF_SUCCESS;
3042
3043 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3044 }
3045 }
3046}
3047
3048
3049/**
3050 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
3051 */
3052FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
3053{
3054 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3055 IEMOP_HLP_NO_LOCK_PREFIX();
3056 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
3057
3058 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3059 {
3060 IEMOP_HLP_NO_LOCK_PREFIX();
3061
3062 switch (pIemCpu->enmEffOpSize)
3063 {
3064 case IEMMODE_16BIT:
3065 IEM_MC_BEGIN(4, 0);
3066 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3067 IEM_MC_ARG(uint16_t, u16Src, 1);
3068 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3069 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3070
3071 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3072 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3073 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3074 IEM_MC_REF_EFLAGS(pEFlags);
3075 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3076
3077 IEM_MC_ADVANCE_RIP();
3078 IEM_MC_END();
3079 return VINF_SUCCESS;
3080
3081 case IEMMODE_32BIT:
3082 IEM_MC_BEGIN(4, 0);
3083 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3084 IEM_MC_ARG(uint32_t, u32Src, 1);
3085 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3086 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3087
3088 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3089 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3090 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3091 IEM_MC_REF_EFLAGS(pEFlags);
3092 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3093
3094 IEM_MC_ADVANCE_RIP();
3095 IEM_MC_END();
3096 return VINF_SUCCESS;
3097
3098 case IEMMODE_64BIT:
3099 IEM_MC_BEGIN(4, 0);
3100 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3101 IEM_MC_ARG(uint64_t, u64Src, 1);
3102 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3103 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3104
3105 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3106 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3107 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3108 IEM_MC_REF_EFLAGS(pEFlags);
3109 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3110
3111 IEM_MC_ADVANCE_RIP();
3112 IEM_MC_END();
3113 return VINF_SUCCESS;
3114
3115 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3116 }
3117 }
3118 else
3119 {
3120 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3121
3122 switch (pIemCpu->enmEffOpSize)
3123 {
3124 case IEMMODE_16BIT:
3125 IEM_MC_BEGIN(4, 2);
3126 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3127 IEM_MC_ARG(uint16_t, u16Src, 1);
3128 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3129 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3130 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3131
3132 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3133 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3134 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3135 IEM_MC_FETCH_EFLAGS(EFlags);
3136 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3137 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3138
3139 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3140 IEM_MC_COMMIT_EFLAGS(EFlags);
3141 IEM_MC_ADVANCE_RIP();
3142 IEM_MC_END();
3143 return VINF_SUCCESS;
3144
3145 case IEMMODE_32BIT:
3146 IEM_MC_BEGIN(4, 2);
3147 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3148 IEM_MC_ARG(uint32_t, u32Src, 1);
3149 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3150 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3151 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3152
3153 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3154 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3155 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3156 IEM_MC_FETCH_EFLAGS(EFlags);
3157 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3158 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3159
3160 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3161 IEM_MC_COMMIT_EFLAGS(EFlags);
3162 IEM_MC_ADVANCE_RIP();
3163 IEM_MC_END();
3164 return VINF_SUCCESS;
3165
3166 case IEMMODE_64BIT:
3167 IEM_MC_BEGIN(4, 2);
3168 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3169 IEM_MC_ARG(uint64_t, u64Src, 1);
3170 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3171 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3172 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3173
3174 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3175 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3176 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3177 IEM_MC_FETCH_EFLAGS(EFlags);
3178 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3179 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3180
3181 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3182 IEM_MC_COMMIT_EFLAGS(EFlags);
3183 IEM_MC_ADVANCE_RIP();
3184 IEM_MC_END();
3185 return VINF_SUCCESS;
3186
3187 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3188 }
3189 }
3190}
3191
3192
3193
3194/** Opcode 0x0f 0xa4. */
3195FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
3196{
3197 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
3198 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
3199}
3200
3201
3202/** Opcode 0x0f 0xa7. */
3203FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
3204{
3205 IEMOP_MNEMONIC("shld Ev,Gv,CL");
3206 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
3207}
3208
3209
3210/** Opcode 0x0f 0xa8. */
3211FNIEMOP_DEF(iemOp_push_gs)
3212{
3213 IEMOP_MNEMONIC("push gs");
3214 IEMOP_HLP_NO_LOCK_PREFIX();
3215 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
3216}
3217
3218
3219/** Opcode 0x0f 0xa9. */
3220FNIEMOP_DEF(iemOp_pop_gs)
3221{
3222 IEMOP_MNEMONIC("pop gs");
3223 IEMOP_HLP_NO_LOCK_PREFIX();
3224 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
3225}
3226
3227
3228/** Opcode 0x0f 0xaa. */
3229FNIEMOP_STUB(iemOp_rsm);
3230
3231
3232/** Opcode 0x0f 0xab. */
3233FNIEMOP_DEF(iemOp_bts_Ev_Gv)
3234{
3235 IEMOP_MNEMONIC("bts Gv,Mp");
3236 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
3237}
3238
3239
3240/** Opcode 0x0f 0xac. */
3241FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
3242{
3243 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
3244 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
3245}
3246
3247
3248/** Opcode 0x0f 0xad. */
3249FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
3250{
3251 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
3252 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
3253}
3254
3255
3256/** Opcode 0x0f 0xae. */
3257FNIEMOP_STUB(iemOp_Grp15);
3258
3259
3260/** Opcode 0x0f 0xaf. */
3261FNIEMOP_DEF(iemOp_imul_Gv_Ev)
3262{
3263 IEMOP_MNEMONIC("imul Gv,Ev");
3264 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3265 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
3266}
3267
3268
3269/** Opcode 0x0f 0xb0. */
3270FNIEMOP_STUB(iemOp_cmpxchg_Eb_Gb);
3271/** Opcode 0x0f 0xb1. */
3272FNIEMOP_STUB(iemOp_cmpxchg_Ev_Gv);
3273
3274
3275FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
3276{
3277 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3278 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3279
3280 /* The source cannot be a register. */
3281 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3282 return IEMOP_RAISE_INVALID_OPCODE();
3283 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
3284
3285 switch (pIemCpu->enmEffOpSize)
3286 {
3287 case IEMMODE_16BIT:
3288 IEM_MC_BEGIN(5, 1);
3289 IEM_MC_ARG(uint16_t, uSel, 0);
3290 IEM_MC_ARG(uint16_t, offSeg, 1);
3291 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3292 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3293 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3294 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3295 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3296 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3297 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
3298 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3299 IEM_MC_END();
3300 return VINF_SUCCESS;
3301
3302 case IEMMODE_32BIT:
3303 IEM_MC_BEGIN(5, 1);
3304 IEM_MC_ARG(uint16_t, uSel, 0);
3305 IEM_MC_ARG(uint32_t, offSeg, 1);
3306 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3307 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3308 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3309 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3310 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3311 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3312 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
3313 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3314 IEM_MC_END();
3315 return VINF_SUCCESS;
3316
3317 case IEMMODE_64BIT:
3318 IEM_MC_BEGIN(5, 1);
3319 IEM_MC_ARG(uint16_t, uSel, 0);
3320 IEM_MC_ARG(uint64_t, offSeg, 1);
3321 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3322 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3323 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3324 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3325 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3326 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3327 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
3328 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3329 IEM_MC_END();
3330 return VINF_SUCCESS;
3331
3332 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3333 }
3334}
3335
3336
3337/** Opcode 0x0f 0xb2. */
3338FNIEMOP_DEF(iemOp_lss_Gv_Mp)
3339{
3340 IEMOP_MNEMONIC("lss Gv,Mp");
3341 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_SS);
3342}
3343
3344
3345/** Opcode 0x0f 0xb3. */
3346FNIEMOP_DEF(iemOp_btr_Ev_Gv)
3347{
3348 IEMOP_MNEMONIC("btr Gv,Mp");
3349 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
3350}
3351
3352
3353/** Opcode 0x0f 0xb4. */
3354FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
3355{
3356 IEMOP_MNEMONIC("lfs Gv,Mp");
3357 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_FS);
3358}
3359
3360
3361/** Opcode 0x0f 0xb5. */
3362FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
3363{
3364 IEMOP_MNEMONIC("lgs Gv,Mp");
3365 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_GS);
3366}
3367
3368
3369/** Opcode 0x0f 0xb6. */
3370FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
3371{
3372 IEMOP_MNEMONIC("movzx Gv,Eb");
3373
3374 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3375 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3376
3377 /*
3378 * If rm is denoting a register, no more instruction bytes.
3379 */
3380 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3381 {
3382 switch (pIemCpu->enmEffOpSize)
3383 {
3384 case IEMMODE_16BIT:
3385 IEM_MC_BEGIN(0, 1);
3386 IEM_MC_LOCAL(uint16_t, u16Value);
3387 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3388 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3389 IEM_MC_ADVANCE_RIP();
3390 IEM_MC_END();
3391 return VINF_SUCCESS;
3392
3393 case IEMMODE_32BIT:
3394 IEM_MC_BEGIN(0, 1);
3395 IEM_MC_LOCAL(uint32_t, u32Value);
3396 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3397 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3398 IEM_MC_ADVANCE_RIP();
3399 IEM_MC_END();
3400 return VINF_SUCCESS;
3401
3402 case IEMMODE_64BIT:
3403 IEM_MC_BEGIN(0, 1);
3404 IEM_MC_LOCAL(uint64_t, u64Value);
3405 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3406 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3407 IEM_MC_ADVANCE_RIP();
3408 IEM_MC_END();
3409 return VINF_SUCCESS;
3410
3411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3412 }
3413 }
3414 else
3415 {
3416 /*
3417 * We're loading a register from memory.
3418 */
3419 switch (pIemCpu->enmEffOpSize)
3420 {
3421 case IEMMODE_16BIT:
3422 IEM_MC_BEGIN(0, 2);
3423 IEM_MC_LOCAL(uint16_t, u16Value);
3424 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3425 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3426 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3427 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3428 IEM_MC_ADVANCE_RIP();
3429 IEM_MC_END();
3430 return VINF_SUCCESS;
3431
3432 case IEMMODE_32BIT:
3433 IEM_MC_BEGIN(0, 2);
3434 IEM_MC_LOCAL(uint32_t, u32Value);
3435 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3436 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3437 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3438 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3439 IEM_MC_ADVANCE_RIP();
3440 IEM_MC_END();
3441 return VINF_SUCCESS;
3442
3443 case IEMMODE_64BIT:
3444 IEM_MC_BEGIN(0, 2);
3445 IEM_MC_LOCAL(uint64_t, u64Value);
3446 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3447 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3448 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3449 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3450 IEM_MC_ADVANCE_RIP();
3451 IEM_MC_END();
3452 return VINF_SUCCESS;
3453
3454 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3455 }
3456 }
3457}
3458
3459
3460/** Opcode 0x0f 0xb7. */
3461FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
3462{
3463 IEMOP_MNEMONIC("movzx Gv,Ew");
3464
3465 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3466 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3467
3468 /** @todo Not entirely sure how the operand size prefix is handled here,
3469 * assuming that it will be ignored. Would be nice to have a few
3470 * test for this. */
3471 /*
3472 * If rm is denoting a register, no more instruction bytes.
3473 */
3474 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3475 {
3476 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3477 {
3478 IEM_MC_BEGIN(0, 1);
3479 IEM_MC_LOCAL(uint32_t, u32Value);
3480 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3481 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3482 IEM_MC_ADVANCE_RIP();
3483 IEM_MC_END();
3484 }
3485 else
3486 {
3487 IEM_MC_BEGIN(0, 1);
3488 IEM_MC_LOCAL(uint64_t, u64Value);
3489 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3490 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3491 IEM_MC_ADVANCE_RIP();
3492 IEM_MC_END();
3493 }
3494 }
3495 else
3496 {
3497 /*
3498 * We're loading a register from memory.
3499 */
3500 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3501 {
3502 IEM_MC_BEGIN(0, 2);
3503 IEM_MC_LOCAL(uint32_t, u32Value);
3504 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3505 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3506 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3507 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3508 IEM_MC_ADVANCE_RIP();
3509 IEM_MC_END();
3510 }
3511 else
3512 {
3513 IEM_MC_BEGIN(0, 2);
3514 IEM_MC_LOCAL(uint64_t, u64Value);
3515 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3516 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3517 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3518 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3519 IEM_MC_ADVANCE_RIP();
3520 IEM_MC_END();
3521 }
3522 }
3523 return VINF_SUCCESS;
3524}
3525
3526
3527/** Opcode 0x0f 0xb8. */
3528FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
3529/** Opcode 0x0f 0xb9. */
3530FNIEMOP_STUB(iemOp_Grp10);
3531
3532
3533/** Opcode 0x0f 0xba. */
3534FNIEMOP_DEF(iemOp_Grp8)
3535{
3536 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3537 PCIEMOPBINSIZES pImpl;
3538 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3539 {
3540 case 0: case 1: case 2: case 3:
3541 return IEMOP_RAISE_INVALID_OPCODE();
3542 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
3543 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
3544 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
3545 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
3546 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3547 }
3548 IEMOP_HLP_NO_LOCK_PREFIX();
3549 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3550
3551 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3552 {
3553 /* register destination. */
3554 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3555 IEMOP_HLP_NO_LOCK_PREFIX();
3556
3557 switch (pIemCpu->enmEffOpSize)
3558 {
3559 case IEMMODE_16BIT:
3560 IEM_MC_BEGIN(3, 0);
3561 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3562 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
3563 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3564
3565 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3566 IEM_MC_REF_EFLAGS(pEFlags);
3567 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3568
3569 IEM_MC_ADVANCE_RIP();
3570 IEM_MC_END();
3571 return VINF_SUCCESS;
3572
3573 case IEMMODE_32BIT:
3574 IEM_MC_BEGIN(3, 0);
3575 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3576 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
3577 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3578
3579 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3580 IEM_MC_REF_EFLAGS(pEFlags);
3581 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3582
3583 IEM_MC_ADVANCE_RIP();
3584 IEM_MC_END();
3585 return VINF_SUCCESS;
3586
3587 case IEMMODE_64BIT:
3588 IEM_MC_BEGIN(3, 0);
3589 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3590 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
3591 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3592
3593 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3594 IEM_MC_REF_EFLAGS(pEFlags);
3595 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3596
3597 IEM_MC_ADVANCE_RIP();
3598 IEM_MC_END();
3599 return VINF_SUCCESS;
3600
3601 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3602 }
3603 }
3604 else
3605 {
3606 /* memory destination. */
3607
3608 uint32_t fAccess;
3609 if (pImpl->pfnLockedU16)
3610 fAccess = IEM_ACCESS_DATA_RW;
3611 else /* BT */
3612 {
3613 IEMOP_HLP_NO_LOCK_PREFIX();
3614 fAccess = IEM_ACCESS_DATA_R;
3615 }
3616
3617 /** @todo test negative bit offsets! */
3618 switch (pIemCpu->enmEffOpSize)
3619 {
3620 case IEMMODE_16BIT:
3621 IEM_MC_BEGIN(3, 1);
3622 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3623 IEM_MC_ARG(uint16_t, u16Src, 1);
3624 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3625 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3626
3627 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3628 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3629 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
3630 IEM_MC_FETCH_EFLAGS(EFlags);
3631 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3632 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3633 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3634 else
3635 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
3636 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3637
3638 IEM_MC_COMMIT_EFLAGS(EFlags);
3639 IEM_MC_ADVANCE_RIP();
3640 IEM_MC_END();
3641 return VINF_SUCCESS;
3642
3643 case IEMMODE_32BIT:
3644 IEM_MC_BEGIN(3, 1);
3645 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3646 IEM_MC_ARG(uint32_t, u32Src, 1);
3647 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3648 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3649
3650 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3651 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3652 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
3653 IEM_MC_FETCH_EFLAGS(EFlags);
3654 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3655 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3656 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3657 else
3658 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
3659 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3660
3661 IEM_MC_COMMIT_EFLAGS(EFlags);
3662 IEM_MC_ADVANCE_RIP();
3663 IEM_MC_END();
3664 return VINF_SUCCESS;
3665
3666 case IEMMODE_64BIT:
3667 IEM_MC_BEGIN(3, 1);
3668 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3669 IEM_MC_ARG(uint64_t, u64Src, 1);
3670 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3671 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3672
3673 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3674 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3675 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
3676 IEM_MC_FETCH_EFLAGS(EFlags);
3677 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3678 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3679 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3680 else
3681 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
3682 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3683
3684 IEM_MC_COMMIT_EFLAGS(EFlags);
3685 IEM_MC_ADVANCE_RIP();
3686 IEM_MC_END();
3687 return VINF_SUCCESS;
3688
3689 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3690 }
3691 }
3692
3693}
3694
3695
3696/** Opcode 0x0f 0xbb. */
3697FNIEMOP_DEF(iemOp_btc_Ev_Gv)
3698{
3699 IEMOP_MNEMONIC("btc Gv,Mp");
3700 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
3701}
3702
3703
3704/** Opcode 0x0f 0xbc. */
3705FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
3706{
3707 IEMOP_MNEMONIC("bsf Gv,Ev");
3708 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3709 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
3710}
3711
3712
3713/** Opcode 0x0f 0xbd. */
3714FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
3715{
3716 IEMOP_MNEMONIC("bsr Gv,Ev");
3717 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3718 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
3719}
3720
3721
3722/** Opcode 0x0f 0xbe. */
3723FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
3724{
3725 IEMOP_MNEMONIC("movsx Gv,Eb");
3726
3727 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3728 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3729
3730 /*
3731 * If rm is denoting a register, no more instruction bytes.
3732 */
3733 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3734 {
3735 switch (pIemCpu->enmEffOpSize)
3736 {
3737 case IEMMODE_16BIT:
3738 IEM_MC_BEGIN(0, 1);
3739 IEM_MC_LOCAL(uint16_t, u16Value);
3740 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3741 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3742 IEM_MC_ADVANCE_RIP();
3743 IEM_MC_END();
3744 return VINF_SUCCESS;
3745
3746 case IEMMODE_32BIT:
3747 IEM_MC_BEGIN(0, 1);
3748 IEM_MC_LOCAL(uint32_t, u32Value);
3749 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3750 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3751 IEM_MC_ADVANCE_RIP();
3752 IEM_MC_END();
3753 return VINF_SUCCESS;
3754
3755 case IEMMODE_64BIT:
3756 IEM_MC_BEGIN(0, 1);
3757 IEM_MC_LOCAL(uint64_t, u64Value);
3758 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3759 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3760 IEM_MC_ADVANCE_RIP();
3761 IEM_MC_END();
3762 return VINF_SUCCESS;
3763
3764 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3765 }
3766 }
3767 else
3768 {
3769 /*
3770 * We're loading a register from memory.
3771 */
3772 switch (pIemCpu->enmEffOpSize)
3773 {
3774 case IEMMODE_16BIT:
3775 IEM_MC_BEGIN(0, 2);
3776 IEM_MC_LOCAL(uint16_t, u16Value);
3777 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3778 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3779 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3780 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3781 IEM_MC_ADVANCE_RIP();
3782 IEM_MC_END();
3783 return VINF_SUCCESS;
3784
3785 case IEMMODE_32BIT:
3786 IEM_MC_BEGIN(0, 2);
3787 IEM_MC_LOCAL(uint32_t, u32Value);
3788 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3789 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3790 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3791 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3792 IEM_MC_ADVANCE_RIP();
3793 IEM_MC_END();
3794 return VINF_SUCCESS;
3795
3796 case IEMMODE_64BIT:
3797 IEM_MC_BEGIN(0, 2);
3798 IEM_MC_LOCAL(uint64_t, u64Value);
3799 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3800 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3801 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3802 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3803 IEM_MC_ADVANCE_RIP();
3804 IEM_MC_END();
3805 return VINF_SUCCESS;
3806
3807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3808 }
3809 }
3810}
3811
3812
3813/** Opcode 0x0f 0xbf. */
3814FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
3815{
3816 IEMOP_MNEMONIC("movsx Gv,Ew");
3817
3818 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3819 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3820
3821 /** @todo Not entirely sure how the operand size prefix is handled here,
3822 * assuming that it will be ignored. Would be nice to have a few
3823 * test for this. */
3824 /*
3825 * If rm is denoting a register, no more instruction bytes.
3826 */
3827 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3828 {
3829 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3830 {
3831 IEM_MC_BEGIN(0, 1);
3832 IEM_MC_LOCAL(uint32_t, u32Value);
3833 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3834 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3835 IEM_MC_ADVANCE_RIP();
3836 IEM_MC_END();
3837 }
3838 else
3839 {
3840 IEM_MC_BEGIN(0, 1);
3841 IEM_MC_LOCAL(uint64_t, u64Value);
3842 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3843 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3844 IEM_MC_ADVANCE_RIP();
3845 IEM_MC_END();
3846 }
3847 }
3848 else
3849 {
3850 /*
3851 * We're loading a register from memory.
3852 */
3853 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3854 {
3855 IEM_MC_BEGIN(0, 2);
3856 IEM_MC_LOCAL(uint32_t, u32Value);
3857 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3858 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3859 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3860 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3861 IEM_MC_ADVANCE_RIP();
3862 IEM_MC_END();
3863 }
3864 else
3865 {
3866 IEM_MC_BEGIN(0, 2);
3867 IEM_MC_LOCAL(uint64_t, u64Value);
3868 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3869 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3870 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3871 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3872 IEM_MC_ADVANCE_RIP();
3873 IEM_MC_END();
3874 }
3875 }
3876 return VINF_SUCCESS;
3877}
3878
3879
3880/** Opcode 0x0f 0xc0. */
3881FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
3882{
3883 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3884 IEMOP_MNEMONIC("xadd Eb,Gb");
3885
3886 /*
3887 * If rm is denoting a register, no more instruction bytes.
3888 */
3889 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3890 {
3891 IEMOP_HLP_NO_LOCK_PREFIX();
3892
3893 IEM_MC_BEGIN(3, 0);
3894 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
3895 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
3896 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3897
3898 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3899 IEM_MC_REF_GREG_U8(pu8Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3900 IEM_MC_REF_EFLAGS(pEFlags);
3901 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
3902
3903 IEM_MC_ADVANCE_RIP();
3904 IEM_MC_END();
3905 }
3906 else
3907 {
3908 /*
3909 * We're accessing memory.
3910 */
3911 IEM_MC_BEGIN(3, 3);
3912 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
3913 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
3914 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
3915 IEM_MC_LOCAL(uint8_t, u8RegCopy);
3916 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3917
3918 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3919 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
3920 IEM_MC_FETCH_GREG_U8(u8RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3921 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
3922 IEM_MC_FETCH_EFLAGS(EFlags);
3923 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3924 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
3925 else
3926 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8_locked, pu8Dst, pu8Reg, pEFlags);
3927
3928 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
3929 IEM_MC_COMMIT_EFLAGS(EFlags);
3930 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8RegCopy);
3931 IEM_MC_ADVANCE_RIP();
3932 IEM_MC_END();
3933 return VINF_SUCCESS;
3934 }
3935 return VINF_SUCCESS;
3936}
3937
3938
3939/** Opcode 0x0f 0xc1. */
3940FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
3941{
3942 IEMOP_MNEMONIC("xadd Ev,Gv");
3943 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3944
3945 /*
3946 * If rm is denoting a register, no more instruction bytes.
3947 */
3948 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3949 {
3950 IEMOP_HLP_NO_LOCK_PREFIX();
3951
3952 switch (pIemCpu->enmEffOpSize)
3953 {
3954 case IEMMODE_16BIT:
3955 IEM_MC_BEGIN(3, 0);
3956 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3957 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
3958 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3959
3960 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3961 IEM_MC_REF_GREG_U16(pu16Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3962 IEM_MC_REF_EFLAGS(pEFlags);
3963 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
3964
3965 IEM_MC_ADVANCE_RIP();
3966 IEM_MC_END();
3967 return VINF_SUCCESS;
3968
3969 case IEMMODE_32BIT:
3970 IEM_MC_BEGIN(3, 0);
3971 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3972 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
3973 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3974
3975 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3976 IEM_MC_REF_GREG_U32(pu32Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3977 IEM_MC_REF_EFLAGS(pEFlags);
3978 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
3979
3980 IEM_MC_ADVANCE_RIP();
3981 IEM_MC_END();
3982 return VINF_SUCCESS;
3983
3984 case IEMMODE_64BIT:
3985 IEM_MC_BEGIN(3, 0);
3986 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3987 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
3988 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3989
3990 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3991 IEM_MC_REF_GREG_U64(pu64Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3992 IEM_MC_REF_EFLAGS(pEFlags);
3993 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
3994
3995 IEM_MC_ADVANCE_RIP();
3996 IEM_MC_END();
3997 return VINF_SUCCESS;
3998
3999 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4000 }
4001 }
4002 else
4003 {
4004 /*
4005 * We're accessing memory.
4006 */
4007 switch (pIemCpu->enmEffOpSize)
4008 {
4009 case IEMMODE_16BIT:
4010 IEM_MC_BEGIN(3, 3);
4011 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4012 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
4013 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4014 IEM_MC_LOCAL(uint16_t, u16RegCopy);
4015 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4016
4017 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4018 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4019 IEM_MC_FETCH_GREG_U16(u16RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4020 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
4021 IEM_MC_FETCH_EFLAGS(EFlags);
4022 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4023 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
4024 else
4025 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16_locked, pu16Dst, pu16Reg, pEFlags);
4026
4027 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4028 IEM_MC_COMMIT_EFLAGS(EFlags);
4029 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16RegCopy);
4030 IEM_MC_ADVANCE_RIP();
4031 IEM_MC_END();
4032 return VINF_SUCCESS;
4033
4034 case IEMMODE_32BIT:
4035 IEM_MC_BEGIN(3, 3);
4036 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4037 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
4038 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4039 IEM_MC_LOCAL(uint32_t, u32RegCopy);
4040 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4041
4042 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4043 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4044 IEM_MC_FETCH_GREG_U32(u32RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4045 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
4046 IEM_MC_FETCH_EFLAGS(EFlags);
4047 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4048 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
4049 else
4050 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32_locked, pu32Dst, pu32Reg, pEFlags);
4051
4052 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4053 IEM_MC_COMMIT_EFLAGS(EFlags);
4054 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32RegCopy);
4055 IEM_MC_ADVANCE_RIP();
4056 IEM_MC_END();
4057 return VINF_SUCCESS;
4058
4059 case IEMMODE_64BIT:
4060 IEM_MC_BEGIN(3, 3);
4061 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4062 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
4063 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4064 IEM_MC_LOCAL(uint64_t, u64RegCopy);
4065 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4066
4067 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4068 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4069 IEM_MC_FETCH_GREG_U64(u64RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4070 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
4071 IEM_MC_FETCH_EFLAGS(EFlags);
4072 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4073 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
4074 else
4075 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64_locked, pu64Dst, pu64Reg, pEFlags);
4076
4077 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4078 IEM_MC_COMMIT_EFLAGS(EFlags);
4079 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64RegCopy);
4080 IEM_MC_ADVANCE_RIP();
4081 IEM_MC_END();
4082 return VINF_SUCCESS;
4083
4084 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4085 }
4086 }
4087}
4088
4089/** Opcode 0x0f 0xc2. */
4090FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
4091/** Opcode 0x0f 0xc3. */
4092FNIEMOP_STUB(iemOp_movnti_My_Gy);
4093/** Opcode 0x0f 0xc4. */
4094FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
4095/** Opcode 0x0f 0xc5. */
4096FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
4097/** Opcode 0x0f 0xc6. */
4098FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
4099/** Opcode 0x0f 0xc7. */
4100FNIEMOP_STUB(iemOp_Grp9);
4101
4102#if 0
4103/**
4104 * Common 'bswap register' helper.
4105 */
4106FNIEMOP_DEF_2(iemOpCommonBswapGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
4107{
4108 IEMOP_HLP_NO_LOCK_PREFIX();
4109 switch (pIemCpu->enmEffOpSize)
4110 {
4111 case IEMMODE_16BIT:
4112 IEM_MC_BEGIN(2, 0);
4113 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4114 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4115 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
4116 IEM_MC_REF_EFLAGS(pEFlags);
4117 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
4118 IEM_MC_ADVANCE_RIP();
4119 IEM_MC_END();
4120 return VINF_SUCCESS;
4121
4122 case IEMMODE_32BIT:
4123 IEM_MC_BEGIN(2, 0);
4124 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4125 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4126 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
4127 IEM_MC_REF_EFLAGS(pEFlags);
4128 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
4129 IEM_MC_ADVANCE_RIP();
4130 IEM_MC_END();
4131 return VINF_SUCCESS;
4132
4133 case IEMMODE_64BIT:
4134 IEM_MC_BEGIN(2, 0);
4135 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4136 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4137 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
4138 IEM_MC_REF_EFLAGS(pEFlags);
4139 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
4140 IEM_MC_ADVANCE_RIP();
4141 IEM_MC_END();
4142 return VINF_SUCCESS;
4143 }
4144 return VINF_SUCCESS;
4145}
4146
4147
4148/** Opcode 0x0f 0xc8. */
4149FNIEMOP_DEF(iemOp_bswap_rAX_r8)
4150{
4151 IEMOP_MNEMONIC("bswap rAX/r8");
4152 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX | pIemCpu->uRexReg);
4153}
4154
4155
4156#else
4157FNIEMOP_STUB(iemOp_bswap_rAX_r8);
4158#endif
4159/** Opcode 0x0f 0xc9. */
4160FNIEMOP_STUB(iemOp_bswap_rCX_r9);
4161/** Opcode 0x0f 0xca. */
4162FNIEMOP_STUB(iemOp_bswap_rDX_r10);
4163/** Opcode 0x0f 0xcb. */
4164FNIEMOP_STUB(iemOp_bswap_rBX_r11);
4165/** Opcode 0x0f 0xcc. */
4166FNIEMOP_STUB(iemOp_bswap_rSP_r12);
4167/** Opcode 0x0f 0xcd. */
4168FNIEMOP_STUB(iemOp_bswap_rBP_r13);
4169/** Opcode 0x0f 0xce. */
4170FNIEMOP_STUB(iemOp_bswap_rSI_r14);
4171/** Opcode 0x0f 0xcf. */
4172FNIEMOP_STUB(iemOp_bswap_rDI_r15);
4173
4174/** Opcode 0x0f 0xd0. */
4175FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
4176/** Opcode 0x0f 0xd1. */
4177FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
4178/** Opcode 0x0f 0xd2. */
4179FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
4180/** Opcode 0x0f 0xd3. */
4181FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
4182/** Opcode 0x0f 0xd4. */
4183FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
4184/** Opcode 0x0f 0xd5. */
4185FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
4186/** Opcode 0x0f 0xd6. */
4187FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
4188/** Opcode 0x0f 0xd7. */
4189FNIEMOP_STUB(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq);
4190/** Opcode 0x0f 0xd8. */
4191FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
4192/** Opcode 0x0f 0xd9. */
4193FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
4194/** Opcode 0x0f 0xda. */
4195FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
4196/** Opcode 0x0f 0xdb. */
4197FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
4198/** Opcode 0x0f 0xdc. */
4199FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
4200/** Opcode 0x0f 0xdd. */
4201FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
4202/** Opcode 0x0f 0xde. */
4203FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
4204/** Opcode 0x0f 0xdf. */
4205FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
4206/** Opcode 0x0f 0xe0. */
4207FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
4208/** Opcode 0x0f 0xe1. */
4209FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
4210/** Opcode 0x0f 0xe2. */
4211FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
4212/** Opcode 0x0f 0xe3. */
4213FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
4214/** Opcode 0x0f 0xe4. */
4215FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
4216/** Opcode 0x0f 0xe5. */
4217FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
4218/** Opcode 0x0f 0xe6. */
4219FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
4220/** Opcode 0x0f 0xe7. */
4221FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
4222/** Opcode 0x0f 0xe8. */
4223FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
4224/** Opcode 0x0f 0xe9. */
4225FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
4226/** Opcode 0x0f 0xea. */
4227FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
4228/** Opcode 0x0f 0xeb. */
4229FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
4230/** Opcode 0x0f 0xec. */
4231FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
4232/** Opcode 0x0f 0xed. */
4233FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
4234/** Opcode 0x0f 0xee. */
4235FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
4236/** Opcode 0x0f 0xef. */
4237FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq);
4238/** Opcode 0x0f 0xf0. */
4239FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
4240/** Opcode 0x0f 0xf1. */
4241FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
4242/** Opcode 0x0f 0xf2. */
4243FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
4244/** Opcode 0x0f 0xf3. */
4245FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
4246/** Opcode 0x0f 0xf4. */
4247FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
4248/** Opcode 0x0f 0xf5. */
4249FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
4250/** Opcode 0x0f 0xf6. */
4251FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
4252/** Opcode 0x0f 0xf7. */
4253FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
4254/** Opcode 0x0f 0xf8. */
4255FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq);
4256/** Opcode 0x0f 0xf9. */
4257FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
4258/** Opcode 0x0f 0xfa. */
4259FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
4260/** Opcode 0x0f 0xfb. */
4261FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
4262/** Opcode 0x0f 0xfc. */
4263FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
4264/** Opcode 0x0f 0xfd. */
4265FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
4266/** Opcode 0x0f 0xfe. */
4267FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
4268
4269
4270const PFNIEMOP g_apfnTwoByteMap[256] =
4271{
4272 /* 0x00 */ iemOp_Grp6, iemOp_Grp7, iemOp_lar_Gv_Ew, iemOp_lsl_Gv_Ew,
4273 /* 0x04 */ iemOp_Invalid, iemOp_syscall, iemOp_clts, iemOp_sysret,
4274 /* 0x08 */ iemOp_invd, iemOp_wbinvd, iemOp_Invalid, iemOp_ud2,
4275 /* 0x0c */ iemOp_Invalid, iemOp_nop_Ev_prefetch, iemOp_femms, iemOp_3Dnow,
4276 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
4277 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
4278 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
4279 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
4280 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
4281 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
4282 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
4283 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
4284 /* 0x18 */ iemOp_prefetch_Grp16, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4285 /* 0x1c */ iemOp_Invalid, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4286 /* 0x20 */ iemOp_mov_Rd_Cd, iemOp_mov_Rd_Dd, iemOp_mov_Cd_Rd, iemOp_mov_Dd_Rd,
4287 /* 0x24 */ iemOp_mov_Rd_Td, iemOp_Invalid, iemOp_mov_Td_Rd, iemOp_Invalid,
4288 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
4289 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
4290 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
4291 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
4292 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
4293 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
4294 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
4295 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
4296 /* 0x30 */ iemOp_wrmsr, iemOp_rdtsc, iemOp_rdmsr, iemOp_rdpmc,
4297 /* 0x34 */ iemOp_sysenter, iemOp_sysexit, iemOp_Invalid, iemOp_getsec,
4298 /* 0x38 */ iemOp_3byte_Esc_A4, iemOp_Invalid, iemOp_3byte_Esc_A5, iemOp_Invalid,
4299 /* 0x3c */ iemOp_movnti_Gv_Ev/*?*/,iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4300 /* 0x40 */ iemOp_cmovo_Gv_Ev, iemOp_cmovno_Gv_Ev, iemOp_cmovc_Gv_Ev, iemOp_cmovnc_Gv_Ev,
4301 /* 0x44 */ iemOp_cmove_Gv_Ev, iemOp_cmovne_Gv_Ev, iemOp_cmovbe_Gv_Ev, iemOp_cmovnbe_Gv_Ev,
4302 /* 0x48 */ iemOp_cmovs_Gv_Ev, iemOp_cmovns_Gv_Ev, iemOp_cmovp_Gv_Ev, iemOp_cmovnp_Gv_Ev,
4303 /* 0x4c */ iemOp_cmovl_Gv_Ev, iemOp_cmovnl_Gv_Ev, iemOp_cmovle_Gv_Ev, iemOp_cmovnle_Gv_Ev,
4304 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
4305 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
4306 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
4307 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
4308 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
4309 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
4310 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
4311 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
4312 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
4313 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
4314 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
4315 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
4316 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
4317 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
4318 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
4319 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
4320 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
4321 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
4322 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
4323 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
4324 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
4325 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
4326 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
4327 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
4328 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
4329 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
4330 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
4331 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
4332 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
4333 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
4334 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
4335 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
4336 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
4337 /* 0x71 */ iemOp_Grp12,
4338 /* 0x72 */ iemOp_Grp13,
4339 /* 0x73 */ iemOp_Grp14,
4340 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
4341 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
4342 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
4343 /* 0x77 */ iemOp_emms,
4344 /* 0x78 */ iemOp_vmread, iemOp_vmwrite, iemOp_Invalid, iemOp_Invalid,
4345 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
4346 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
4347 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
4348 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
4349 /* 0x80 */ iemOp_jo_Jv, iemOp_jno_Jv, iemOp_jc_Jv, iemOp_jnc_Jv,
4350 /* 0x84 */ iemOp_je_Jv, iemOp_jne_Jv, iemOp_jbe_Jv, iemOp_jnbe_Jv,
4351 /* 0x88 */ iemOp_js_Jv, iemOp_jns_Jv, iemOp_jp_Jv, iemOp_jnp_Jv,
4352 /* 0x8c */ iemOp_jl_Jv, iemOp_jnl_Jv, iemOp_jle_Jv, iemOp_jnle_Jv,
4353 /* 0x90 */ iemOp_seto_Eb, iemOp_setno_Eb, iemOp_setc_Eb, iemOp_setnc_Eb,
4354 /* 0x94 */ iemOp_sete_Eb, iemOp_setne_Eb, iemOp_setbe_Eb, iemOp_setnbe_Eb,
4355 /* 0x98 */ iemOp_sets_Eb, iemOp_setns_Eb, iemOp_setp_Eb, iemOp_setnp_Eb,
4356 /* 0x9c */ iemOp_setl_Eb, iemOp_setnl_Eb, iemOp_setle_Eb, iemOp_setnle_Eb,
4357 /* 0xa0 */ iemOp_push_fs, iemOp_pop_fs, iemOp_cpuid, iemOp_bt_Ev_Gv,
4358 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib, iemOp_shld_Ev_Gv_CL, iemOp_Invalid, iemOp_Invalid,
4359 /* 0xa8 */ iemOp_push_gs, iemOp_pop_gs, iemOp_rsm, iemOp_bts_Ev_Gv,
4360 /* 0xac */ iemOp_shrd_Ev_Gv_Ib, iemOp_shrd_Ev_Gv_CL, iemOp_Grp15, iemOp_imul_Gv_Ev,
4361 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb, iemOp_cmpxchg_Ev_Gv, iemOp_lss_Gv_Mp, iemOp_btr_Ev_Gv,
4362 /* 0xb4 */ iemOp_lfs_Gv_Mp, iemOp_lgs_Gv_Mp, iemOp_movzx_Gv_Eb, iemOp_movzx_Gv_Ew,
4363 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,iemOp_Grp10, iemOp_Grp8, iemOp_btc_Ev_Gv,
4364 /* 0xbc */ iemOp_bsf_Gv_Ev, iemOp_bsr_Gv_Ev, iemOp_movsx_Gv_Eb, iemOp_movsx_Gv_Ew,
4365 /* 0xc0 */ iemOp_xadd_Eb_Gb,
4366 /* 0xc1 */ iemOp_xadd_Ev_Gv,
4367 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
4368 /* 0xc3 */ iemOp_movnti_My_Gy,
4369 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
4370 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
4371 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
4372 /* 0xc7 */ iemOp_Grp9,
4373 /* 0xc8 */ iemOp_bswap_rAX_r8, iemOp_bswap_rCX_r9, iemOp_bswap_rDX_r10, iemOp_bswap_rBX_r11,
4374 /* 0xcc */ iemOp_bswap_rSP_r12, iemOp_bswap_rBP_r13, iemOp_bswap_rSI_r14, iemOp_bswap_rDI_r15,
4375 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
4376 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
4377 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
4378 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
4379 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
4380 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
4381 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
4382 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
4383 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
4384 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
4385 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
4386 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
4387 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
4388 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
4389 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
4390 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
4391 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
4392 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
4393 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
4394 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
4395 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
4396 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
4397 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
4398 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
4399 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
4400 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
4401 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
4402 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
4403 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
4404 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
4405 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
4406 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
4407 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
4408 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
4409 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
4410 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
4411 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
4412 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
4413 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
4414 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
4415 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
4416 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
4417 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
4418 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
4419 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
4420 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
4421 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
4422 /* 0xff */ iemOp_Invalid
4423};
4424
4425/** @} */
4426
4427
4428/** @name One byte opcodes.
4429 *
4430 * @{
4431 */
4432
4433/** Opcode 0x00. */
4434FNIEMOP_DEF(iemOp_add_Eb_Gb)
4435{
4436 IEMOP_MNEMONIC("add Eb,Gb");
4437 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
4438}
4439
4440
4441/** Opcode 0x01. */
4442FNIEMOP_DEF(iemOp_add_Ev_Gv)
4443{
4444 IEMOP_MNEMONIC("add Ev,Gv");
4445 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
4446}
4447
4448
4449/** Opcode 0x02. */
4450FNIEMOP_DEF(iemOp_add_Gb_Eb)
4451{
4452 IEMOP_MNEMONIC("add Gb,Eb");
4453 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
4454}
4455
4456
4457/** Opcode 0x03. */
4458FNIEMOP_DEF(iemOp_add_Gv_Ev)
4459{
4460 IEMOP_MNEMONIC("add Gv,Ev");
4461 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
4462}
4463
4464
4465/** Opcode 0x04. */
4466FNIEMOP_DEF(iemOp_add_Al_Ib)
4467{
4468 IEMOP_MNEMONIC("add al,Ib");
4469 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
4470}
4471
4472
4473/** Opcode 0x05. */
4474FNIEMOP_DEF(iemOp_add_eAX_Iz)
4475{
4476 IEMOP_MNEMONIC("add rAX,Iz");
4477 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
4478}
4479
4480
4481/** Opcode 0x06. */
4482FNIEMOP_DEF(iemOp_push_ES)
4483{
4484 IEMOP_MNEMONIC("push es");
4485 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
4486}
4487
4488
4489/** Opcode 0x07. */
4490FNIEMOP_DEF(iemOp_pop_ES)
4491{
4492 IEMOP_MNEMONIC("pop es");
4493 IEMOP_HLP_NO_64BIT();
4494 IEMOP_HLP_NO_LOCK_PREFIX();
4495 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
4496}
4497
4498
4499/** Opcode 0x08. */
4500FNIEMOP_DEF(iemOp_or_Eb_Gb)
4501{
4502 IEMOP_MNEMONIC("or Eb,Gb");
4503 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4504 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
4505}
4506
4507
4508/** Opcode 0x09. */
4509FNIEMOP_DEF(iemOp_or_Ev_Gv)
4510{
4511 IEMOP_MNEMONIC("or Ev,Gv ");
4512 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4513 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
4514}
4515
4516
4517/** Opcode 0x0a. */
4518FNIEMOP_DEF(iemOp_or_Gb_Eb)
4519{
4520 IEMOP_MNEMONIC("or Gb,Eb");
4521 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4522 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
4523}
4524
4525
4526/** Opcode 0x0b. */
4527FNIEMOP_DEF(iemOp_or_Gv_Ev)
4528{
4529 IEMOP_MNEMONIC("or Gv,Ev");
4530 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4531 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
4532}
4533
4534
4535/** Opcode 0x0c. */
4536FNIEMOP_DEF(iemOp_or_Al_Ib)
4537{
4538 IEMOP_MNEMONIC("or al,Ib");
4539 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4540 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
4541}
4542
4543
4544/** Opcode 0x0d. */
4545FNIEMOP_DEF(iemOp_or_eAX_Iz)
4546{
4547 IEMOP_MNEMONIC("or rAX,Iz");
4548 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4549 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
4550}
4551
4552
4553/** Opcode 0x0e. */
4554FNIEMOP_DEF(iemOp_push_CS)
4555{
4556 IEMOP_MNEMONIC("push cs");
4557 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
4558}
4559
4560
4561/** Opcode 0x0f. */
4562FNIEMOP_DEF(iemOp_2byteEscape)
4563{
4564 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4565 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
4566}
4567
4568/** Opcode 0x10. */
4569FNIEMOP_DEF(iemOp_adc_Eb_Gb)
4570{
4571 IEMOP_MNEMONIC("adc Eb,Gb");
4572 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
4573}
4574
4575
4576/** Opcode 0x11. */
4577FNIEMOP_DEF(iemOp_adc_Ev_Gv)
4578{
4579 IEMOP_MNEMONIC("adc Ev,Gv");
4580 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
4581}
4582
4583
4584/** Opcode 0x12. */
4585FNIEMOP_DEF(iemOp_adc_Gb_Eb)
4586{
4587 IEMOP_MNEMONIC("adc Gb,Eb");
4588 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
4589}
4590
4591
4592/** Opcode 0x13. */
4593FNIEMOP_DEF(iemOp_adc_Gv_Ev)
4594{
4595 IEMOP_MNEMONIC("adc Gv,Ev");
4596 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
4597}
4598
4599
4600/** Opcode 0x14. */
4601FNIEMOP_DEF(iemOp_adc_Al_Ib)
4602{
4603 IEMOP_MNEMONIC("adc al,Ib");
4604 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
4605}
4606
4607
4608/** Opcode 0x15. */
4609FNIEMOP_DEF(iemOp_adc_eAX_Iz)
4610{
4611 IEMOP_MNEMONIC("adc rAX,Iz");
4612 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
4613}
4614
4615
4616/** Opcode 0x16. */
4617FNIEMOP_DEF(iemOp_push_SS)
4618{
4619 IEMOP_MNEMONIC("push ss");
4620 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
4621}
4622
4623
4624/** Opcode 0x17. */
4625FNIEMOP_DEF(iemOp_pop_SS)
4626{
4627 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
4628 IEMOP_HLP_NO_LOCK_PREFIX();
4629 IEMOP_HLP_NO_64BIT();
4630 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
4631}
4632
4633
4634/** Opcode 0x18. */
4635FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
4636{
4637 IEMOP_MNEMONIC("sbb Eb,Gb");
4638 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
4639}
4640
4641
4642/** Opcode 0x19. */
4643FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
4644{
4645 IEMOP_MNEMONIC("sbb Ev,Gv");
4646 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
4647}
4648
4649
4650/** Opcode 0x1a. */
4651FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
4652{
4653 IEMOP_MNEMONIC("sbb Gb,Eb");
4654 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
4655}
4656
4657
4658/** Opcode 0x1b. */
4659FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
4660{
4661 IEMOP_MNEMONIC("sbb Gv,Ev");
4662 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
4663}
4664
4665
4666/** Opcode 0x1c. */
4667FNIEMOP_DEF(iemOp_sbb_Al_Ib)
4668{
4669 IEMOP_MNEMONIC("sbb al,Ib");
4670 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
4671}
4672
4673
4674/** Opcode 0x1d. */
4675FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
4676{
4677 IEMOP_MNEMONIC("sbb rAX,Iz");
4678 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
4679}
4680
4681
4682/** Opcode 0x1e. */
4683FNIEMOP_DEF(iemOp_push_DS)
4684{
4685 IEMOP_MNEMONIC("push ds");
4686 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
4687}
4688
4689
4690/** Opcode 0x1f. */
4691FNIEMOP_DEF(iemOp_pop_DS)
4692{
4693 IEMOP_MNEMONIC("pop ds");
4694 IEMOP_HLP_NO_LOCK_PREFIX();
4695 IEMOP_HLP_NO_64BIT();
4696 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
4697}
4698
4699
4700/** Opcode 0x20. */
4701FNIEMOP_DEF(iemOp_and_Eb_Gb)
4702{
4703 IEMOP_MNEMONIC("and Eb,Gb");
4704 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4705 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
4706}
4707
4708
4709/** Opcode 0x21. */
4710FNIEMOP_DEF(iemOp_and_Ev_Gv)
4711{
4712 IEMOP_MNEMONIC("and Ev,Gv");
4713 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4714 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
4715}
4716
4717
4718/** Opcode 0x22. */
4719FNIEMOP_DEF(iemOp_and_Gb_Eb)
4720{
4721 IEMOP_MNEMONIC("and Gb,Eb");
4722 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4723 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
4724}
4725
4726
4727/** Opcode 0x23. */
4728FNIEMOP_DEF(iemOp_and_Gv_Ev)
4729{
4730 IEMOP_MNEMONIC("and Gv,Ev");
4731 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4732 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
4733}
4734
4735
4736/** Opcode 0x24. */
4737FNIEMOP_DEF(iemOp_and_Al_Ib)
4738{
4739 IEMOP_MNEMONIC("and al,Ib");
4740 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4741 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
4742}
4743
4744
4745/** Opcode 0x25. */
4746FNIEMOP_DEF(iemOp_and_eAX_Iz)
4747{
4748 IEMOP_MNEMONIC("and rAX,Iz");
4749 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4750 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
4751}
4752
4753
4754/** Opcode 0x26. */
4755FNIEMOP_DEF(iemOp_seg_ES)
4756{
4757 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
4758 pIemCpu->iEffSeg = X86_SREG_ES;
4759
4760 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4761 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4762}
4763
4764
4765/** Opcode 0x27. */
4766FNIEMOP_STUB(iemOp_daa);
4767
4768
4769/** Opcode 0x28. */
4770FNIEMOP_DEF(iemOp_sub_Eb_Gb)
4771{
4772 IEMOP_MNEMONIC("sub Eb,Gb");
4773 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
4774}
4775
4776
4777/** Opcode 0x29. */
4778FNIEMOP_DEF(iemOp_sub_Ev_Gv)
4779{
4780 IEMOP_MNEMONIC("sub Ev,Gv");
4781 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
4782}
4783
4784
4785/** Opcode 0x2a. */
4786FNIEMOP_DEF(iemOp_sub_Gb_Eb)
4787{
4788 IEMOP_MNEMONIC("sub Gb,Eb");
4789 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
4790}
4791
4792
4793/** Opcode 0x2b. */
4794FNIEMOP_DEF(iemOp_sub_Gv_Ev)
4795{
4796 IEMOP_MNEMONIC("sub Gv,Ev");
4797 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
4798}
4799
4800
4801/** Opcode 0x2c. */
4802FNIEMOP_DEF(iemOp_sub_Al_Ib)
4803{
4804 IEMOP_MNEMONIC("sub al,Ib");
4805 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
4806}
4807
4808
4809/** Opcode 0x2d. */
4810FNIEMOP_DEF(iemOp_sub_eAX_Iz)
4811{
4812 IEMOP_MNEMONIC("sub rAX,Iz");
4813 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
4814}
4815
4816
4817/** Opcode 0x2e. */
4818FNIEMOP_DEF(iemOp_seg_CS)
4819{
4820 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
4821 pIemCpu->iEffSeg = X86_SREG_CS;
4822
4823 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4824 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4825}
4826
4827
4828/** Opcode 0x2f. */
4829FNIEMOP_STUB(iemOp_das);
4830
4831
4832/** Opcode 0x30. */
4833FNIEMOP_DEF(iemOp_xor_Eb_Gb)
4834{
4835 IEMOP_MNEMONIC("xor Eb,Gb");
4836 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4837 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
4838}
4839
4840
4841/** Opcode 0x31. */
4842FNIEMOP_DEF(iemOp_xor_Ev_Gv)
4843{
4844 IEMOP_MNEMONIC("xor Ev,Gv");
4845 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4846 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
4847}
4848
4849
4850/** Opcode 0x32. */
4851FNIEMOP_DEF(iemOp_xor_Gb_Eb)
4852{
4853 IEMOP_MNEMONIC("xor Gb,Eb");
4854 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4855 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
4856}
4857
4858
4859/** Opcode 0x33. */
4860FNIEMOP_DEF(iemOp_xor_Gv_Ev)
4861{
4862 IEMOP_MNEMONIC("xor Gv,Ev");
4863 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4864 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
4865}
4866
4867
4868/** Opcode 0x34. */
4869FNIEMOP_DEF(iemOp_xor_Al_Ib)
4870{
4871 IEMOP_MNEMONIC("xor al,Ib");
4872 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4873 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
4874}
4875
4876
4877/** Opcode 0x35. */
4878FNIEMOP_DEF(iemOp_xor_eAX_Iz)
4879{
4880 IEMOP_MNEMONIC("xor rAX,Iz");
4881 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4882 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
4883}
4884
4885
4886/** Opcode 0x36. */
4887FNIEMOP_DEF(iemOp_seg_SS)
4888{
4889 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
4890 pIemCpu->iEffSeg = X86_SREG_SS;
4891
4892 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4893 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4894}
4895
4896
4897/** Opcode 0x37. */
4898FNIEMOP_STUB(iemOp_aaa);
4899
4900
4901/** Opcode 0x38. */
4902FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
4903{
4904 IEMOP_MNEMONIC("cmp Eb,Gb");
4905 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4906 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
4907}
4908
4909
4910/** Opcode 0x39. */
4911FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
4912{
4913 IEMOP_MNEMONIC("cmp Ev,Gv");
4914 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4915 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
4916}
4917
4918
4919/** Opcode 0x3a. */
4920FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
4921{
4922 IEMOP_MNEMONIC("cmp Gb,Eb");
4923 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
4924}
4925
4926
4927/** Opcode 0x3b. */
4928FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
4929{
4930 IEMOP_MNEMONIC("cmp Gv,Ev");
4931 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
4932}
4933
4934
4935/** Opcode 0x3c. */
4936FNIEMOP_DEF(iemOp_cmp_Al_Ib)
4937{
4938 IEMOP_MNEMONIC("cmp al,Ib");
4939 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
4940}
4941
4942
4943/** Opcode 0x3d. */
4944FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
4945{
4946 IEMOP_MNEMONIC("cmp rAX,Iz");
4947 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
4948}
4949
4950
4951/** Opcode 0x3e. */
4952FNIEMOP_DEF(iemOp_seg_DS)
4953{
4954 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
4955 pIemCpu->iEffSeg = X86_SREG_DS;
4956
4957 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4958 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4959}
4960
4961
4962/** Opcode 0x3f. */
4963FNIEMOP_STUB(iemOp_aas);
4964
4965/**
4966 * Common 'inc/dec/not/neg register' helper.
4967 */
4968FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
4969{
4970 IEMOP_HLP_NO_LOCK_PREFIX();
4971 switch (pIemCpu->enmEffOpSize)
4972 {
4973 case IEMMODE_16BIT:
4974 IEM_MC_BEGIN(2, 0);
4975 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4976 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4977 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
4978 IEM_MC_REF_EFLAGS(pEFlags);
4979 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
4980 IEM_MC_ADVANCE_RIP();
4981 IEM_MC_END();
4982 return VINF_SUCCESS;
4983
4984 case IEMMODE_32BIT:
4985 IEM_MC_BEGIN(2, 0);
4986 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4987 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4988 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
4989 IEM_MC_REF_EFLAGS(pEFlags);
4990 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
4991 IEM_MC_ADVANCE_RIP();
4992 IEM_MC_END();
4993 return VINF_SUCCESS;
4994
4995 case IEMMODE_64BIT:
4996 IEM_MC_BEGIN(2, 0);
4997 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4998 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4999 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5000 IEM_MC_REF_EFLAGS(pEFlags);
5001 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
5002 IEM_MC_ADVANCE_RIP();
5003 IEM_MC_END();
5004 return VINF_SUCCESS;
5005 }
5006 return VINF_SUCCESS;
5007}
5008
5009
5010/** Opcode 0x40. */
5011FNIEMOP_DEF(iemOp_inc_eAX)
5012{
5013 /*
5014 * This is a REX prefix in 64-bit mode.
5015 */
5016 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5017 {
5018 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
5019
5020 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5021 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5022 }
5023
5024 IEMOP_MNEMONIC("inc eAX");
5025 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
5026}
5027
5028
5029/** Opcode 0x41. */
5030FNIEMOP_DEF(iemOp_inc_eCX)
5031{
5032 /*
5033 * This is a REX prefix in 64-bit mode.
5034 */
5035 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5036 {
5037 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
5038 pIemCpu->uRexB = 1 << 3;
5039
5040 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5041 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5042 }
5043
5044 IEMOP_MNEMONIC("inc eCX");
5045 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
5046}
5047
5048
5049/** Opcode 0x42. */
5050FNIEMOP_DEF(iemOp_inc_eDX)
5051{
5052 /*
5053 * This is a REX prefix in 64-bit mode.
5054 */
5055 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5056 {
5057 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
5058 pIemCpu->uRexIndex = 1 << 3;
5059
5060 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5061 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5062 }
5063
5064 IEMOP_MNEMONIC("inc eDX");
5065 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
5066}
5067
5068
5069
5070/** Opcode 0x43. */
5071FNIEMOP_DEF(iemOp_inc_eBX)
5072{
5073 /*
5074 * This is a REX prefix in 64-bit mode.
5075 */
5076 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5077 {
5078 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5079 pIemCpu->uRexB = 1 << 3;
5080 pIemCpu->uRexIndex = 1 << 3;
5081
5082 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5083 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5084 }
5085
5086 IEMOP_MNEMONIC("inc eBX");
5087 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
5088}
5089
5090
5091/** Opcode 0x44. */
5092FNIEMOP_DEF(iemOp_inc_eSP)
5093{
5094 /*
5095 * This is a REX prefix in 64-bit mode.
5096 */
5097 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5098 {
5099 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
5100 pIemCpu->uRexReg = 1 << 3;
5101
5102 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5103 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5104 }
5105
5106 IEMOP_MNEMONIC("inc eSP");
5107 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
5108}
5109
5110
5111/** Opcode 0x45. */
5112FNIEMOP_DEF(iemOp_inc_eBP)
5113{
5114 /*
5115 * This is a REX prefix in 64-bit mode.
5116 */
5117 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5118 {
5119 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
5120 pIemCpu->uRexReg = 1 << 3;
5121 pIemCpu->uRexB = 1 << 3;
5122
5123 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5124 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5125 }
5126
5127 IEMOP_MNEMONIC("inc eBP");
5128 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
5129}
5130
5131
5132/** Opcode 0x46. */
5133FNIEMOP_DEF(iemOp_inc_eSI)
5134{
5135 /*
5136 * This is a REX prefix in 64-bit mode.
5137 */
5138 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5139 {
5140 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
5141 pIemCpu->uRexReg = 1 << 3;
5142 pIemCpu->uRexIndex = 1 << 3;
5143
5144 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5145 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5146 }
5147
5148 IEMOP_MNEMONIC("inc eSI");
5149 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
5150}
5151
5152
5153/** Opcode 0x47. */
5154FNIEMOP_DEF(iemOp_inc_eDI)
5155{
5156 /*
5157 * This is a REX prefix in 64-bit mode.
5158 */
5159 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5160 {
5161 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5162 pIemCpu->uRexReg = 1 << 3;
5163 pIemCpu->uRexB = 1 << 3;
5164 pIemCpu->uRexIndex = 1 << 3;
5165
5166 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5167 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5168 }
5169
5170 IEMOP_MNEMONIC("inc eDI");
5171 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
5172}
5173
5174
5175/** Opcode 0x48. */
5176FNIEMOP_DEF(iemOp_dec_eAX)
5177{
5178 /*
5179 * This is a REX prefix in 64-bit mode.
5180 */
5181 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5182 {
5183 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
5184 iemRecalEffOpSize(pIemCpu);
5185
5186 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5187 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5188 }
5189
5190 IEMOP_MNEMONIC("dec eAX");
5191 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
5192}
5193
5194
5195/** Opcode 0x49. */
5196FNIEMOP_DEF(iemOp_dec_eCX)
5197{
5198 /*
5199 * This is a REX prefix in 64-bit mode.
5200 */
5201 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5202 {
5203 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5204 pIemCpu->uRexB = 1 << 3;
5205 iemRecalEffOpSize(pIemCpu);
5206
5207 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5208 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5209 }
5210
5211 IEMOP_MNEMONIC("dec eCX");
5212 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
5213}
5214
5215
5216/** Opcode 0x4a. */
5217FNIEMOP_DEF(iemOp_dec_eDX)
5218{
5219 /*
5220 * This is a REX prefix in 64-bit mode.
5221 */
5222 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5223 {
5224 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5225 pIemCpu->uRexIndex = 1 << 3;
5226 iemRecalEffOpSize(pIemCpu);
5227
5228 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5229 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5230 }
5231
5232 IEMOP_MNEMONIC("dec eDX");
5233 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
5234}
5235
5236
5237/** Opcode 0x4b. */
5238FNIEMOP_DEF(iemOp_dec_eBX)
5239{
5240 /*
5241 * This is a REX prefix in 64-bit mode.
5242 */
5243 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5244 {
5245 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5246 pIemCpu->uRexB = 1 << 3;
5247 pIemCpu->uRexIndex = 1 << 3;
5248 iemRecalEffOpSize(pIemCpu);
5249
5250 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5251 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5252 }
5253
5254 IEMOP_MNEMONIC("dec eBX");
5255 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
5256}
5257
5258
5259/** Opcode 0x4c. */
5260FNIEMOP_DEF(iemOp_dec_eSP)
5261{
5262 /*
5263 * This is a REX prefix in 64-bit mode.
5264 */
5265 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5266 {
5267 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
5268 pIemCpu->uRexReg = 1 << 3;
5269 iemRecalEffOpSize(pIemCpu);
5270
5271 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5272 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5273 }
5274
5275 IEMOP_MNEMONIC("dec eSP");
5276 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
5277}
5278
5279
5280/** Opcode 0x4d. */
5281FNIEMOP_DEF(iemOp_dec_eBP)
5282{
5283 /*
5284 * This is a REX prefix in 64-bit mode.
5285 */
5286 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5287 {
5288 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5289 pIemCpu->uRexReg = 1 << 3;
5290 pIemCpu->uRexB = 1 << 3;
5291 iemRecalEffOpSize(pIemCpu);
5292
5293 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5294 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5295 }
5296
5297 IEMOP_MNEMONIC("dec eBP");
5298 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
5299}
5300
5301
5302/** Opcode 0x4e. */
5303FNIEMOP_DEF(iemOp_dec_eSI)
5304{
5305 /*
5306 * This is a REX prefix in 64-bit mode.
5307 */
5308 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5309 {
5310 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5311 pIemCpu->uRexReg = 1 << 3;
5312 pIemCpu->uRexIndex = 1 << 3;
5313 iemRecalEffOpSize(pIemCpu);
5314
5315 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5316 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5317 }
5318
5319 IEMOP_MNEMONIC("dec eSI");
5320 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
5321}
5322
5323
5324/** Opcode 0x4f. */
5325FNIEMOP_DEF(iemOp_dec_eDI)
5326{
5327 /*
5328 * This is a REX prefix in 64-bit mode.
5329 */
5330 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5331 {
5332 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5333 pIemCpu->uRexReg = 1 << 3;
5334 pIemCpu->uRexB = 1 << 3;
5335 pIemCpu->uRexIndex = 1 << 3;
5336 iemRecalEffOpSize(pIemCpu);
5337
5338 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5339 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5340 }
5341
5342 IEMOP_MNEMONIC("dec eDI");
5343 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
5344}
5345
5346
5347/**
5348 * Common 'push register' helper.
5349 */
5350FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
5351{
5352 IEMOP_HLP_NO_LOCK_PREFIX();
5353 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5354 {
5355 iReg |= pIemCpu->uRexB;
5356 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5357 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5358 }
5359
5360 switch (pIemCpu->enmEffOpSize)
5361 {
5362 case IEMMODE_16BIT:
5363 IEM_MC_BEGIN(0, 1);
5364 IEM_MC_LOCAL(uint16_t, u16Value);
5365 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
5366 IEM_MC_PUSH_U16(u16Value);
5367 IEM_MC_ADVANCE_RIP();
5368 IEM_MC_END();
5369 break;
5370
5371 case IEMMODE_32BIT:
5372 IEM_MC_BEGIN(0, 1);
5373 IEM_MC_LOCAL(uint32_t, u32Value);
5374 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
5375 IEM_MC_PUSH_U32(u32Value);
5376 IEM_MC_ADVANCE_RIP();
5377 IEM_MC_END();
5378 break;
5379
5380 case IEMMODE_64BIT:
5381 IEM_MC_BEGIN(0, 1);
5382 IEM_MC_LOCAL(uint64_t, u64Value);
5383 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
5384 IEM_MC_PUSH_U64(u64Value);
5385 IEM_MC_ADVANCE_RIP();
5386 IEM_MC_END();
5387 break;
5388 }
5389
5390 return VINF_SUCCESS;
5391}
5392
5393
5394/** Opcode 0x50. */
5395FNIEMOP_DEF(iemOp_push_eAX)
5396{
5397 IEMOP_MNEMONIC("push rAX");
5398 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
5399}
5400
5401
5402/** Opcode 0x51. */
5403FNIEMOP_DEF(iemOp_push_eCX)
5404{
5405 IEMOP_MNEMONIC("push rCX");
5406 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
5407}
5408
5409
5410/** Opcode 0x52. */
5411FNIEMOP_DEF(iemOp_push_eDX)
5412{
5413 IEMOP_MNEMONIC("push rDX");
5414 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
5415}
5416
5417
5418/** Opcode 0x53. */
5419FNIEMOP_DEF(iemOp_push_eBX)
5420{
5421 IEMOP_MNEMONIC("push rBX");
5422 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
5423}
5424
5425
5426/** Opcode 0x54. */
5427FNIEMOP_DEF(iemOp_push_eSP)
5428{
5429 IEMOP_MNEMONIC("push rSP");
5430 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
5431}
5432
5433
5434/** Opcode 0x55. */
5435FNIEMOP_DEF(iemOp_push_eBP)
5436{
5437 IEMOP_MNEMONIC("push rBP");
5438 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
5439}
5440
5441
5442/** Opcode 0x56. */
5443FNIEMOP_DEF(iemOp_push_eSI)
5444{
5445 IEMOP_MNEMONIC("push rSI");
5446 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
5447}
5448
5449
5450/** Opcode 0x57. */
5451FNIEMOP_DEF(iemOp_push_eDI)
5452{
5453 IEMOP_MNEMONIC("push rDI");
5454 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
5455}
5456
5457
5458/**
5459 * Common 'pop register' helper.
5460 */
5461FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
5462{
5463 IEMOP_HLP_NO_LOCK_PREFIX();
5464 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5465 {
5466 iReg |= pIemCpu->uRexB;
5467 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5468 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5469 }
5470
5471/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
5472 * handle it, for that matter (Intel pseudo code hints that the popped
5473 * value is incremented by the stack item size.) Test it, both encodings
5474 * and all three register sizes. */
5475 switch (pIemCpu->enmEffOpSize)
5476 {
5477 case IEMMODE_16BIT:
5478 IEM_MC_BEGIN(0, 1);
5479 IEM_MC_LOCAL(uint16_t, *pu16Dst);
5480 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
5481 IEM_MC_POP_U16(pu16Dst);
5482 IEM_MC_ADVANCE_RIP();
5483 IEM_MC_END();
5484 break;
5485
5486 case IEMMODE_32BIT:
5487 IEM_MC_BEGIN(0, 1);
5488 IEM_MC_LOCAL(uint32_t, *pu32Dst);
5489 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
5490 IEM_MC_POP_U32(pu32Dst);
5491 IEM_MC_ADVANCE_RIP();
5492 IEM_MC_END();
5493 break;
5494
5495 case IEMMODE_64BIT:
5496 IEM_MC_BEGIN(0, 1);
5497 IEM_MC_LOCAL(uint64_t, *pu64Dst);
5498 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5499 IEM_MC_POP_U64(pu64Dst);
5500 IEM_MC_ADVANCE_RIP();
5501 IEM_MC_END();
5502 break;
5503 }
5504
5505 return VINF_SUCCESS;
5506}
5507
5508
5509/** Opcode 0x58. */
5510FNIEMOP_DEF(iemOp_pop_eAX)
5511{
5512 IEMOP_MNEMONIC("pop rAX");
5513 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
5514}
5515
5516
5517/** Opcode 0x59. */
5518FNIEMOP_DEF(iemOp_pop_eCX)
5519{
5520 IEMOP_MNEMONIC("pop rCX");
5521 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
5522}
5523
5524
5525/** Opcode 0x5a. */
5526FNIEMOP_DEF(iemOp_pop_eDX)
5527{
5528 IEMOP_MNEMONIC("pop rDX");
5529 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
5530}
5531
5532
5533/** Opcode 0x5b. */
5534FNIEMOP_DEF(iemOp_pop_eBX)
5535{
5536 IEMOP_MNEMONIC("pop rBX");
5537 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
5538}
5539
5540
5541/** Opcode 0x5c. */
5542FNIEMOP_DEF(iemOp_pop_eSP)
5543{
5544 IEMOP_MNEMONIC("pop rSP");
5545 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
5546}
5547
5548
5549/** Opcode 0x5d. */
5550FNIEMOP_DEF(iemOp_pop_eBP)
5551{
5552 IEMOP_MNEMONIC("pop rBP");
5553 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
5554}
5555
5556
5557/** Opcode 0x5e. */
5558FNIEMOP_DEF(iemOp_pop_eSI)
5559{
5560 IEMOP_MNEMONIC("pop rSI");
5561 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
5562}
5563
5564
5565/** Opcode 0x5f. */
5566FNIEMOP_DEF(iemOp_pop_eDI)
5567{
5568 IEMOP_MNEMONIC("pop rDI");
5569 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
5570}
5571
5572
5573/** Opcode 0x60. */
5574FNIEMOP_DEF(iemOp_pusha)
5575{
5576 IEMOP_MNEMONIC("pusha");
5577 IEMOP_HLP_NO_64BIT();
5578 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5579 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
5580 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5581 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
5582}
5583
5584
5585/** Opcode 0x61. */
5586FNIEMOP_DEF(iemOp_popa)
5587{
5588 IEMOP_MNEMONIC("popa");
5589 IEMOP_HLP_NO_64BIT();
5590 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5591 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
5592 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5593 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
5594}
5595
5596
5597/** Opcode 0x62. */
5598FNIEMOP_STUB(iemOp_bound_Gv_Ma);
5599/** Opcode 0x63. */
5600FNIEMOP_STUB(iemOp_arpl_Ew_Gw);
5601
5602
5603/** Opcode 0x64. */
5604FNIEMOP_DEF(iemOp_seg_FS)
5605{
5606 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
5607 pIemCpu->iEffSeg = X86_SREG_FS;
5608
5609 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5610 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5611}
5612
5613
5614/** Opcode 0x65. */
5615FNIEMOP_DEF(iemOp_seg_GS)
5616{
5617 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
5618 pIemCpu->iEffSeg = X86_SREG_GS;
5619
5620 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5621 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5622}
5623
5624
5625/** Opcode 0x66. */
5626FNIEMOP_DEF(iemOp_op_size)
5627{
5628 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
5629 iemRecalEffOpSize(pIemCpu);
5630
5631 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5632 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5633}
5634
5635
5636/** Opcode 0x67. */
5637FNIEMOP_DEF(iemOp_addr_size)
5638{
5639 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
5640 switch (pIemCpu->enmDefAddrMode)
5641 {
5642 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5643 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
5644 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5645 default: AssertFailed();
5646 }
5647
5648 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5649 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5650}
5651
5652
5653/** Opcode 0x68. */
5654FNIEMOP_DEF(iemOp_push_Iz)
5655{
5656 IEMOP_MNEMONIC("push Iz");
5657 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5658 switch (pIemCpu->enmEffOpSize)
5659 {
5660 case IEMMODE_16BIT:
5661 {
5662 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5663 IEMOP_HLP_NO_LOCK_PREFIX();
5664 IEM_MC_BEGIN(0,0);
5665 IEM_MC_PUSH_U16(u16Imm);
5666 IEM_MC_ADVANCE_RIP();
5667 IEM_MC_END();
5668 return VINF_SUCCESS;
5669 }
5670
5671 case IEMMODE_32BIT:
5672 {
5673 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5674 IEMOP_HLP_NO_LOCK_PREFIX();
5675 IEM_MC_BEGIN(0,0);
5676 IEM_MC_PUSH_U32(u32Imm);
5677 IEM_MC_ADVANCE_RIP();
5678 IEM_MC_END();
5679 return VINF_SUCCESS;
5680 }
5681
5682 case IEMMODE_64BIT:
5683 {
5684 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5685 IEMOP_HLP_NO_LOCK_PREFIX();
5686 IEM_MC_BEGIN(0,0);
5687 IEM_MC_PUSH_U64(u64Imm);
5688 IEM_MC_ADVANCE_RIP();
5689 IEM_MC_END();
5690 return VINF_SUCCESS;
5691 }
5692
5693 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5694 }
5695}
5696
5697
5698/** Opcode 0x69. */
5699FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
5700{
5701 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
5702 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5703 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5704
5705 switch (pIemCpu->enmEffOpSize)
5706 {
5707 case IEMMODE_16BIT:
5708 {
5709 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5710 IEMOP_HLP_NO_LOCK_PREFIX();
5711 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5712 {
5713 /* register operand */
5714 IEM_MC_BEGIN(3, 1);
5715 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5716 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5717 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5718 IEM_MC_LOCAL(uint16_t, u16Tmp);
5719
5720 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5721 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5722 IEM_MC_REF_EFLAGS(pEFlags);
5723 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5724 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5725
5726 IEM_MC_ADVANCE_RIP();
5727 IEM_MC_END();
5728 }
5729 else
5730 {
5731 /* memory operand */
5732 IEM_MC_BEGIN(3, 2);
5733 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5734 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5735 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5736 IEM_MC_LOCAL(uint16_t, u16Tmp);
5737 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5738
5739 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5740 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5741 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5742 IEM_MC_REF_EFLAGS(pEFlags);
5743 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5744 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5745
5746 IEM_MC_ADVANCE_RIP();
5747 IEM_MC_END();
5748 }
5749 return VINF_SUCCESS;
5750 }
5751
5752 case IEMMODE_32BIT:
5753 {
5754 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5755 IEMOP_HLP_NO_LOCK_PREFIX();
5756 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5757 {
5758 /* register operand */
5759 IEM_MC_BEGIN(3, 1);
5760 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5761 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5762 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5763 IEM_MC_LOCAL(uint32_t, u32Tmp);
5764
5765 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5766 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5767 IEM_MC_REF_EFLAGS(pEFlags);
5768 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5769 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5770
5771 IEM_MC_ADVANCE_RIP();
5772 IEM_MC_END();
5773 }
5774 else
5775 {
5776 /* memory operand */
5777 IEM_MC_BEGIN(3, 2);
5778 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5779 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5780 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5781 IEM_MC_LOCAL(uint32_t, u32Tmp);
5782 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5783
5784 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5785 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5786 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5787 IEM_MC_REF_EFLAGS(pEFlags);
5788 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5789 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5790
5791 IEM_MC_ADVANCE_RIP();
5792 IEM_MC_END();
5793 }
5794 return VINF_SUCCESS;
5795 }
5796
5797 case IEMMODE_64BIT:
5798 {
5799 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5800 IEMOP_HLP_NO_LOCK_PREFIX();
5801 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5802 {
5803 /* register operand */
5804 IEM_MC_BEGIN(3, 1);
5805 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5806 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5807 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5808 IEM_MC_LOCAL(uint64_t, u64Tmp);
5809
5810 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5811 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5812 IEM_MC_REF_EFLAGS(pEFlags);
5813 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5814 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5815
5816 IEM_MC_ADVANCE_RIP();
5817 IEM_MC_END();
5818 }
5819 else
5820 {
5821 /* memory operand */
5822 IEM_MC_BEGIN(3, 2);
5823 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5824 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5825 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5826 IEM_MC_LOCAL(uint64_t, u64Tmp);
5827 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5828
5829 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5830 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5831 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5832 IEM_MC_REF_EFLAGS(pEFlags);
5833 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5834 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5835
5836 IEM_MC_ADVANCE_RIP();
5837 IEM_MC_END();
5838 }
5839 return VINF_SUCCESS;
5840 }
5841 }
5842 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5843}
5844
5845
5846/** Opcode 0x6a. */
5847FNIEMOP_DEF(iemOp_push_Ib)
5848{
5849 IEMOP_MNEMONIC("push Ib");
5850 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
5851 IEMOP_HLP_NO_LOCK_PREFIX();
5852 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5853
5854 IEM_MC_BEGIN(0,0);
5855 switch (pIemCpu->enmEffOpSize)
5856 {
5857 case IEMMODE_16BIT:
5858 IEM_MC_PUSH_U16(i8Imm);
5859 break;
5860 case IEMMODE_32BIT:
5861 IEM_MC_PUSH_U32(i8Imm);
5862 break;
5863 case IEMMODE_64BIT:
5864 IEM_MC_PUSH_U64(i8Imm);
5865 break;
5866 }
5867 IEM_MC_ADVANCE_RIP();
5868 IEM_MC_END();
5869 return VINF_SUCCESS;
5870}
5871
5872
5873/** Opcode 0x6b. */
5874FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
5875{
5876 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
5877 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5878 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
5879 IEMOP_HLP_NO_LOCK_PREFIX();
5880 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5881
5882 switch (pIemCpu->enmEffOpSize)
5883 {
5884 case IEMMODE_16BIT:
5885 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5886 {
5887 /* register operand */
5888 IEM_MC_BEGIN(3, 1);
5889 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5890 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5891 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5892 IEM_MC_LOCAL(uint16_t, u16Tmp);
5893
5894 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5895 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5896 IEM_MC_REF_EFLAGS(pEFlags);
5897 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5898 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5899
5900 IEM_MC_ADVANCE_RIP();
5901 IEM_MC_END();
5902 }
5903 else
5904 {
5905 /* memory operand */
5906 IEM_MC_BEGIN(3, 2);
5907 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5908 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5909 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5910 IEM_MC_LOCAL(uint16_t, u16Tmp);
5911 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5912
5913 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5914 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5915 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5916 IEM_MC_REF_EFLAGS(pEFlags);
5917 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5918 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5919
5920 IEM_MC_ADVANCE_RIP();
5921 IEM_MC_END();
5922 }
5923 return VINF_SUCCESS;
5924
5925 case IEMMODE_32BIT:
5926 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5927 {
5928 /* register operand */
5929 IEM_MC_BEGIN(3, 1);
5930 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5931 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5932 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5933 IEM_MC_LOCAL(uint32_t, u32Tmp);
5934
5935 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5936 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5937 IEM_MC_REF_EFLAGS(pEFlags);
5938 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5939 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5940
5941 IEM_MC_ADVANCE_RIP();
5942 IEM_MC_END();
5943 }
5944 else
5945 {
5946 /* memory operand */
5947 IEM_MC_BEGIN(3, 2);
5948 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5949 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5950 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5951 IEM_MC_LOCAL(uint32_t, u32Tmp);
5952 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5953
5954 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5955 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5956 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5957 IEM_MC_REF_EFLAGS(pEFlags);
5958 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5959 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5960
5961 IEM_MC_ADVANCE_RIP();
5962 IEM_MC_END();
5963 }
5964 return VINF_SUCCESS;
5965
5966 case IEMMODE_64BIT:
5967 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5968 {
5969 /* register operand */
5970 IEM_MC_BEGIN(3, 1);
5971 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5972 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5973 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5974 IEM_MC_LOCAL(uint64_t, u64Tmp);
5975
5976 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5977 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5978 IEM_MC_REF_EFLAGS(pEFlags);
5979 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5980 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5981
5982 IEM_MC_ADVANCE_RIP();
5983 IEM_MC_END();
5984 }
5985 else
5986 {
5987 /* memory operand */
5988 IEM_MC_BEGIN(3, 2);
5989 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5990 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5991 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5992 IEM_MC_LOCAL(uint64_t, u64Tmp);
5993 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5994
5995 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5996 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5997 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5998 IEM_MC_REF_EFLAGS(pEFlags);
5999 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
6000 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
6001
6002 IEM_MC_ADVANCE_RIP();
6003 IEM_MC_END();
6004 }
6005 return VINF_SUCCESS;
6006 }
6007 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6008}
6009
6010
6011/** Opcode 0x6c. */
6012FNIEMOP_DEF(iemOp_insb_Yb_DX)
6013{
6014 IEMOP_HLP_NO_LOCK_PREFIX();
6015 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6016 {
6017 IEMOP_MNEMONIC("rep ins Yb,DX");
6018 switch (pIemCpu->enmEffAddrMode)
6019 {
6020 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr16);
6021 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr32);
6022 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr64);
6023 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6024 }
6025 }
6026 else
6027 {
6028 IEMOP_MNEMONIC("ins Yb,DX");
6029 switch (pIemCpu->enmEffAddrMode)
6030 {
6031 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr16);
6032 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr32);
6033 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr64);
6034 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6035 }
6036 }
6037}
6038
6039
6040/** Opcode 0x6d. */
6041FNIEMOP_DEF(iemOp_inswd_Yv_DX)
6042{
6043 IEMOP_HLP_NO_LOCK_PREFIX();
6044 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
6045 {
6046 IEMOP_MNEMONIC("rep ins Yv,DX");
6047 switch (pIemCpu->enmEffOpSize)
6048 {
6049 case IEMMODE_16BIT:
6050 switch (pIemCpu->enmEffAddrMode)
6051 {
6052 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr16);
6053 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr32);
6054 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr64);
6055 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6056 }
6057 break;
6058 case IEMMODE_64BIT:
6059 case IEMMODE_32BIT:
6060 switch (pIemCpu->enmEffAddrMode)
6061 {
6062 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr16);
6063 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr32);
6064 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr64);
6065 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6066 }
6067 break;
6068 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6069 }
6070 }
6071 else
6072 {
6073 IEMOP_MNEMONIC("ins Yv,DX");
6074 switch (pIemCpu->enmEffOpSize)
6075 {
6076 case IEMMODE_16BIT:
6077 switch (pIemCpu->enmEffAddrMode)
6078 {
6079 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr16);
6080 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr32);
6081 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr64);
6082 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6083 }
6084 break;
6085 case IEMMODE_64BIT:
6086 case IEMMODE_32BIT:
6087 switch (pIemCpu->enmEffAddrMode)
6088 {
6089 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr16);
6090 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr32);
6091 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr64);
6092 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6093 }
6094 break;
6095 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6096 }
6097 }
6098}
6099
6100
6101/** Opcode 0x6e. */
6102FNIEMOP_DEF(iemOp_outsb_Yb_DX)
6103{
6104 IEMOP_HLP_NO_LOCK_PREFIX();
6105 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6106 {
6107 IEMOP_MNEMONIC("rep out DX,Yb");
6108 switch (pIemCpu->enmEffAddrMode)
6109 {
6110 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg);
6111 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg);
6112 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg);
6113 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6114 }
6115 }
6116 else
6117 {
6118 IEMOP_MNEMONIC("out DX,Yb");
6119 switch (pIemCpu->enmEffAddrMode)
6120 {
6121 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg);
6122 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg);
6123 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg);
6124 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6125 }
6126 }
6127}
6128
6129
6130/** Opcode 0x6f. */
6131FNIEMOP_DEF(iemOp_outswd_Yv_DX)
6132{
6133 IEMOP_HLP_NO_LOCK_PREFIX();
6134 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
6135 {
6136 IEMOP_MNEMONIC("rep outs DX,Yv");
6137 switch (pIemCpu->enmEffOpSize)
6138 {
6139 case IEMMODE_16BIT:
6140 switch (pIemCpu->enmEffAddrMode)
6141 {
6142 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
6143 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
6144 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
6145 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6146 }
6147 break;
6148 case IEMMODE_64BIT:
6149 case IEMMODE_32BIT:
6150 switch (pIemCpu->enmEffAddrMode)
6151 {
6152 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
6153 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
6154 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
6155 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6156 }
6157 break;
6158 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6159 }
6160 }
6161 else
6162 {
6163 IEMOP_MNEMONIC("outs DX,Yv");
6164 switch (pIemCpu->enmEffOpSize)
6165 {
6166 case IEMMODE_16BIT:
6167 switch (pIemCpu->enmEffAddrMode)
6168 {
6169 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg);
6170 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg);
6171 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg);
6172 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6173 }
6174 break;
6175 case IEMMODE_64BIT:
6176 case IEMMODE_32BIT:
6177 switch (pIemCpu->enmEffAddrMode)
6178 {
6179 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg);
6180 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg);
6181 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg);
6182 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6183 }
6184 break;
6185 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6186 }
6187 }
6188}
6189
6190
6191/** Opcode 0x70. */
6192FNIEMOP_DEF(iemOp_jo_Jb)
6193{
6194 IEMOP_MNEMONIC("jo Jb");
6195 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6196 IEMOP_HLP_NO_LOCK_PREFIX();
6197 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6198
6199 IEM_MC_BEGIN(0, 0);
6200 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6201 IEM_MC_REL_JMP_S8(i8Imm);
6202 } IEM_MC_ELSE() {
6203 IEM_MC_ADVANCE_RIP();
6204 } IEM_MC_ENDIF();
6205 IEM_MC_END();
6206 return VINF_SUCCESS;
6207}
6208
6209
6210/** Opcode 0x71. */
6211FNIEMOP_DEF(iemOp_jno_Jb)
6212{
6213 IEMOP_MNEMONIC("jno Jb");
6214 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6215 IEMOP_HLP_NO_LOCK_PREFIX();
6216 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6217
6218 IEM_MC_BEGIN(0, 0);
6219 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6220 IEM_MC_ADVANCE_RIP();
6221 } IEM_MC_ELSE() {
6222 IEM_MC_REL_JMP_S8(i8Imm);
6223 } IEM_MC_ENDIF();
6224 IEM_MC_END();
6225 return VINF_SUCCESS;
6226}
6227
6228/** Opcode 0x72. */
6229FNIEMOP_DEF(iemOp_jc_Jb)
6230{
6231 IEMOP_MNEMONIC("jc/jnae Jb");
6232 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6233 IEMOP_HLP_NO_LOCK_PREFIX();
6234 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6235
6236 IEM_MC_BEGIN(0, 0);
6237 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6238 IEM_MC_REL_JMP_S8(i8Imm);
6239 } IEM_MC_ELSE() {
6240 IEM_MC_ADVANCE_RIP();
6241 } IEM_MC_ENDIF();
6242 IEM_MC_END();
6243 return VINF_SUCCESS;
6244}
6245
6246
6247/** Opcode 0x73. */
6248FNIEMOP_DEF(iemOp_jnc_Jb)
6249{
6250 IEMOP_MNEMONIC("jnc/jnb Jb");
6251 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6252 IEMOP_HLP_NO_LOCK_PREFIX();
6253 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6254
6255 IEM_MC_BEGIN(0, 0);
6256 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6257 IEM_MC_ADVANCE_RIP();
6258 } IEM_MC_ELSE() {
6259 IEM_MC_REL_JMP_S8(i8Imm);
6260 } IEM_MC_ENDIF();
6261 IEM_MC_END();
6262 return VINF_SUCCESS;
6263}
6264
6265
6266/** Opcode 0x74. */
6267FNIEMOP_DEF(iemOp_je_Jb)
6268{
6269 IEMOP_MNEMONIC("je/jz Jb");
6270 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6271 IEMOP_HLP_NO_LOCK_PREFIX();
6272 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6273
6274 IEM_MC_BEGIN(0, 0);
6275 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6276 IEM_MC_REL_JMP_S8(i8Imm);
6277 } IEM_MC_ELSE() {
6278 IEM_MC_ADVANCE_RIP();
6279 } IEM_MC_ENDIF();
6280 IEM_MC_END();
6281 return VINF_SUCCESS;
6282}
6283
6284
6285/** Opcode 0x75. */
6286FNIEMOP_DEF(iemOp_jne_Jb)
6287{
6288 IEMOP_MNEMONIC("jne/jnz Jb");
6289 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6290 IEMOP_HLP_NO_LOCK_PREFIX();
6291 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6292
6293 IEM_MC_BEGIN(0, 0);
6294 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6295 IEM_MC_ADVANCE_RIP();
6296 } IEM_MC_ELSE() {
6297 IEM_MC_REL_JMP_S8(i8Imm);
6298 } IEM_MC_ENDIF();
6299 IEM_MC_END();
6300 return VINF_SUCCESS;
6301}
6302
6303
6304/** Opcode 0x76. */
6305FNIEMOP_DEF(iemOp_jbe_Jb)
6306{
6307 IEMOP_MNEMONIC("jbe/jna Jb");
6308 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6309 IEMOP_HLP_NO_LOCK_PREFIX();
6310 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6311
6312 IEM_MC_BEGIN(0, 0);
6313 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6314 IEM_MC_REL_JMP_S8(i8Imm);
6315 } IEM_MC_ELSE() {
6316 IEM_MC_ADVANCE_RIP();
6317 } IEM_MC_ENDIF();
6318 IEM_MC_END();
6319 return VINF_SUCCESS;
6320}
6321
6322
6323/** Opcode 0x77. */
6324FNIEMOP_DEF(iemOp_jnbe_Jb)
6325{
6326 IEMOP_MNEMONIC("jnbe/ja Jb");
6327 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6328 IEMOP_HLP_NO_LOCK_PREFIX();
6329 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6330
6331 IEM_MC_BEGIN(0, 0);
6332 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6333 IEM_MC_ADVANCE_RIP();
6334 } IEM_MC_ELSE() {
6335 IEM_MC_REL_JMP_S8(i8Imm);
6336 } IEM_MC_ENDIF();
6337 IEM_MC_END();
6338 return VINF_SUCCESS;
6339}
6340
6341
6342/** Opcode 0x78. */
6343FNIEMOP_DEF(iemOp_js_Jb)
6344{
6345 IEMOP_MNEMONIC("js Jb");
6346 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6347 IEMOP_HLP_NO_LOCK_PREFIX();
6348 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6349
6350 IEM_MC_BEGIN(0, 0);
6351 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6352 IEM_MC_REL_JMP_S8(i8Imm);
6353 } IEM_MC_ELSE() {
6354 IEM_MC_ADVANCE_RIP();
6355 } IEM_MC_ENDIF();
6356 IEM_MC_END();
6357 return VINF_SUCCESS;
6358}
6359
6360
6361/** Opcode 0x79. */
6362FNIEMOP_DEF(iemOp_jns_Jb)
6363{
6364 IEMOP_MNEMONIC("jns Jb");
6365 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6366 IEMOP_HLP_NO_LOCK_PREFIX();
6367 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6368
6369 IEM_MC_BEGIN(0, 0);
6370 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6371 IEM_MC_ADVANCE_RIP();
6372 } IEM_MC_ELSE() {
6373 IEM_MC_REL_JMP_S8(i8Imm);
6374 } IEM_MC_ENDIF();
6375 IEM_MC_END();
6376 return VINF_SUCCESS;
6377}
6378
6379
6380/** Opcode 0x7a. */
6381FNIEMOP_DEF(iemOp_jp_Jb)
6382{
6383 IEMOP_MNEMONIC("jp Jb");
6384 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6385 IEMOP_HLP_NO_LOCK_PREFIX();
6386 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6387
6388 IEM_MC_BEGIN(0, 0);
6389 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6390 IEM_MC_REL_JMP_S8(i8Imm);
6391 } IEM_MC_ELSE() {
6392 IEM_MC_ADVANCE_RIP();
6393 } IEM_MC_ENDIF();
6394 IEM_MC_END();
6395 return VINF_SUCCESS;
6396}
6397
6398
6399/** Opcode 0x7b. */
6400FNIEMOP_DEF(iemOp_jnp_Jb)
6401{
6402 IEMOP_MNEMONIC("jnp Jb");
6403 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6404 IEMOP_HLP_NO_LOCK_PREFIX();
6405 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6406
6407 IEM_MC_BEGIN(0, 0);
6408 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6409 IEM_MC_ADVANCE_RIP();
6410 } IEM_MC_ELSE() {
6411 IEM_MC_REL_JMP_S8(i8Imm);
6412 } IEM_MC_ENDIF();
6413 IEM_MC_END();
6414 return VINF_SUCCESS;
6415}
6416
6417
6418/** Opcode 0x7c. */
6419FNIEMOP_DEF(iemOp_jl_Jb)
6420{
6421 IEMOP_MNEMONIC("jl/jnge Jb");
6422 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6423 IEMOP_HLP_NO_LOCK_PREFIX();
6424 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6425
6426 IEM_MC_BEGIN(0, 0);
6427 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6428 IEM_MC_REL_JMP_S8(i8Imm);
6429 } IEM_MC_ELSE() {
6430 IEM_MC_ADVANCE_RIP();
6431 } IEM_MC_ENDIF();
6432 IEM_MC_END();
6433 return VINF_SUCCESS;
6434}
6435
6436
6437/** Opcode 0x7d. */
6438FNIEMOP_DEF(iemOp_jnl_Jb)
6439{
6440 IEMOP_MNEMONIC("jnl/jge Jb");
6441 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6442 IEMOP_HLP_NO_LOCK_PREFIX();
6443 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6444
6445 IEM_MC_BEGIN(0, 0);
6446 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6447 IEM_MC_ADVANCE_RIP();
6448 } IEM_MC_ELSE() {
6449 IEM_MC_REL_JMP_S8(i8Imm);
6450 } IEM_MC_ENDIF();
6451 IEM_MC_END();
6452 return VINF_SUCCESS;
6453}
6454
6455
6456/** Opcode 0x7e. */
6457FNIEMOP_DEF(iemOp_jle_Jb)
6458{
6459 IEMOP_MNEMONIC("jle/jng Jb");
6460 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6461 IEMOP_HLP_NO_LOCK_PREFIX();
6462 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6463
6464 IEM_MC_BEGIN(0, 0);
6465 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6466 IEM_MC_REL_JMP_S8(i8Imm);
6467 } IEM_MC_ELSE() {
6468 IEM_MC_ADVANCE_RIP();
6469 } IEM_MC_ENDIF();
6470 IEM_MC_END();
6471 return VINF_SUCCESS;
6472}
6473
6474
6475/** Opcode 0x7f. */
6476FNIEMOP_DEF(iemOp_jnle_Jb)
6477{
6478 IEMOP_MNEMONIC("jnle/jg Jb");
6479 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6480 IEMOP_HLP_NO_LOCK_PREFIX();
6481 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6482
6483 IEM_MC_BEGIN(0, 0);
6484 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6485 IEM_MC_ADVANCE_RIP();
6486 } IEM_MC_ELSE() {
6487 IEM_MC_REL_JMP_S8(i8Imm);
6488 } IEM_MC_ENDIF();
6489 IEM_MC_END();
6490 return VINF_SUCCESS;
6491}
6492
6493
6494/** Opcode 0x80. */
6495FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
6496{
6497 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6498 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
6499 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6500
6501 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6502 {
6503 /* register target */
6504 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6505 IEMOP_HLP_NO_LOCK_PREFIX();
6506 IEM_MC_BEGIN(3, 0);
6507 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6508 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6509 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6510
6511 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6512 IEM_MC_REF_EFLAGS(pEFlags);
6513 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6514
6515 IEM_MC_ADVANCE_RIP();
6516 IEM_MC_END();
6517 }
6518 else
6519 {
6520 /* memory target */
6521 uint32_t fAccess;
6522 if (pImpl->pfnLockedU8)
6523 fAccess = IEM_ACCESS_DATA_RW;
6524 else
6525 { /* CMP */
6526 IEMOP_HLP_NO_LOCK_PREFIX();
6527 fAccess = IEM_ACCESS_DATA_R;
6528 }
6529 IEM_MC_BEGIN(3, 2);
6530 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6531 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6532 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6533
6534 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6535 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6536 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6537
6538 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6539 IEM_MC_FETCH_EFLAGS(EFlags);
6540 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6541 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6542 else
6543 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
6544
6545 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
6546 IEM_MC_COMMIT_EFLAGS(EFlags);
6547 IEM_MC_ADVANCE_RIP();
6548 IEM_MC_END();
6549 }
6550 return VINF_SUCCESS;
6551}
6552
6553
6554/** Opcode 0x81. */
6555FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
6556{
6557 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6558 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
6559 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6560
6561 switch (pIemCpu->enmEffOpSize)
6562 {
6563 case IEMMODE_16BIT:
6564 {
6565 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6566 {
6567 /* register target */
6568 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6569 IEMOP_HLP_NO_LOCK_PREFIX();
6570 IEM_MC_BEGIN(3, 0);
6571 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6572 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
6573 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6574
6575 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6576 IEM_MC_REF_EFLAGS(pEFlags);
6577 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6578
6579 IEM_MC_ADVANCE_RIP();
6580 IEM_MC_END();
6581 }
6582 else
6583 {
6584 /* memory target */
6585 uint32_t fAccess;
6586 if (pImpl->pfnLockedU16)
6587 fAccess = IEM_ACCESS_DATA_RW;
6588 else
6589 { /* CMP, TEST */
6590 IEMOP_HLP_NO_LOCK_PREFIX();
6591 fAccess = IEM_ACCESS_DATA_R;
6592 }
6593 IEM_MC_BEGIN(3, 2);
6594 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6595 IEM_MC_ARG(uint16_t, u16Src, 1);
6596 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6597 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6598
6599 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6600 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6601 IEM_MC_ASSIGN(u16Src, u16Imm);
6602 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6603 IEM_MC_FETCH_EFLAGS(EFlags);
6604 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6605 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6606 else
6607 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6608
6609 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6610 IEM_MC_COMMIT_EFLAGS(EFlags);
6611 IEM_MC_ADVANCE_RIP();
6612 IEM_MC_END();
6613 }
6614 break;
6615 }
6616
6617 case IEMMODE_32BIT:
6618 {
6619 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6620 {
6621 /* register target */
6622 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6623 IEMOP_HLP_NO_LOCK_PREFIX();
6624 IEM_MC_BEGIN(3, 0);
6625 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6626 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
6627 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6628
6629 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6630 IEM_MC_REF_EFLAGS(pEFlags);
6631 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6632
6633 IEM_MC_ADVANCE_RIP();
6634 IEM_MC_END();
6635 }
6636 else
6637 {
6638 /* memory target */
6639 uint32_t fAccess;
6640 if (pImpl->pfnLockedU32)
6641 fAccess = IEM_ACCESS_DATA_RW;
6642 else
6643 { /* CMP, TEST */
6644 IEMOP_HLP_NO_LOCK_PREFIX();
6645 fAccess = IEM_ACCESS_DATA_R;
6646 }
6647 IEM_MC_BEGIN(3, 2);
6648 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6649 IEM_MC_ARG(uint32_t, u32Src, 1);
6650 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6651 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6652
6653 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6654 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6655 IEM_MC_ASSIGN(u32Src, u32Imm);
6656 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6657 IEM_MC_FETCH_EFLAGS(EFlags);
6658 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6659 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6660 else
6661 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6662
6663 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6664 IEM_MC_COMMIT_EFLAGS(EFlags);
6665 IEM_MC_ADVANCE_RIP();
6666 IEM_MC_END();
6667 }
6668 break;
6669 }
6670
6671 case IEMMODE_64BIT:
6672 {
6673 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6674 {
6675 /* register target */
6676 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6677 IEMOP_HLP_NO_LOCK_PREFIX();
6678 IEM_MC_BEGIN(3, 0);
6679 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6680 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
6681 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6682
6683 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6684 IEM_MC_REF_EFLAGS(pEFlags);
6685 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6686
6687 IEM_MC_ADVANCE_RIP();
6688 IEM_MC_END();
6689 }
6690 else
6691 {
6692 /* memory target */
6693 uint32_t fAccess;
6694 if (pImpl->pfnLockedU64)
6695 fAccess = IEM_ACCESS_DATA_RW;
6696 else
6697 { /* CMP */
6698 IEMOP_HLP_NO_LOCK_PREFIX();
6699 fAccess = IEM_ACCESS_DATA_R;
6700 }
6701 IEM_MC_BEGIN(3, 2);
6702 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6703 IEM_MC_ARG(uint64_t, u64Src, 1);
6704 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6705 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6706
6707 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6708 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6709 IEM_MC_ASSIGN(u64Src, u64Imm);
6710 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6711 IEM_MC_FETCH_EFLAGS(EFlags);
6712 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6713 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6714 else
6715 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6716
6717 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6718 IEM_MC_COMMIT_EFLAGS(EFlags);
6719 IEM_MC_ADVANCE_RIP();
6720 IEM_MC_END();
6721 }
6722 break;
6723 }
6724 }
6725 return VINF_SUCCESS;
6726}
6727
6728
6729/** Opcode 0x82. */
6730 FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
6731{
6732 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
6733 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
6734}
6735
6736
6737/** Opcode 0x83. */
6738FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
6739{
6740 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6741 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
6742 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6743
6744 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6745 {
6746 /*
6747 * Register target
6748 */
6749 IEMOP_HLP_NO_LOCK_PREFIX();
6750 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6751 switch (pIemCpu->enmEffOpSize)
6752 {
6753 case IEMMODE_16BIT:
6754 {
6755 IEM_MC_BEGIN(3, 0);
6756 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6757 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
6758 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6759
6760 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6761 IEM_MC_REF_EFLAGS(pEFlags);
6762 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6763
6764 IEM_MC_ADVANCE_RIP();
6765 IEM_MC_END();
6766 break;
6767 }
6768
6769 case IEMMODE_32BIT:
6770 {
6771 IEM_MC_BEGIN(3, 0);
6772 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6773 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
6774 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6775
6776 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6777 IEM_MC_REF_EFLAGS(pEFlags);
6778 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6779
6780 IEM_MC_ADVANCE_RIP();
6781 IEM_MC_END();
6782 break;
6783 }
6784
6785 case IEMMODE_64BIT:
6786 {
6787 IEM_MC_BEGIN(3, 0);
6788 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6789 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
6790 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6791
6792 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6793 IEM_MC_REF_EFLAGS(pEFlags);
6794 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6795
6796 IEM_MC_ADVANCE_RIP();
6797 IEM_MC_END();
6798 break;
6799 }
6800 }
6801 }
6802 else
6803 {
6804 /*
6805 * Memory target.
6806 */
6807 uint32_t fAccess;
6808 if (pImpl->pfnLockedU16)
6809 fAccess = IEM_ACCESS_DATA_RW;
6810 else
6811 { /* CMP */
6812 IEMOP_HLP_NO_LOCK_PREFIX();
6813 fAccess = IEM_ACCESS_DATA_R;
6814 }
6815
6816 switch (pIemCpu->enmEffOpSize)
6817 {
6818 case IEMMODE_16BIT:
6819 {
6820 IEM_MC_BEGIN(3, 2);
6821 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6822 IEM_MC_ARG(uint16_t, u16Src, 1);
6823 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6824 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6825
6826 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6827 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6828 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
6829 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6830 IEM_MC_FETCH_EFLAGS(EFlags);
6831 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6832 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6833 else
6834 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6835
6836 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6837 IEM_MC_COMMIT_EFLAGS(EFlags);
6838 IEM_MC_ADVANCE_RIP();
6839 IEM_MC_END();
6840 break;
6841 }
6842
6843 case IEMMODE_32BIT:
6844 {
6845 IEM_MC_BEGIN(3, 2);
6846 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6847 IEM_MC_ARG(uint32_t, u32Src, 1);
6848 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6849 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6850
6851 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6852 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6853 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
6854 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6855 IEM_MC_FETCH_EFLAGS(EFlags);
6856 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6857 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6858 else
6859 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6860
6861 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6862 IEM_MC_COMMIT_EFLAGS(EFlags);
6863 IEM_MC_ADVANCE_RIP();
6864 IEM_MC_END();
6865 break;
6866 }
6867
6868 case IEMMODE_64BIT:
6869 {
6870 IEM_MC_BEGIN(3, 2);
6871 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6872 IEM_MC_ARG(uint64_t, u64Src, 1);
6873 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6874 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6875
6876 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6877 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6878 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
6879 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6880 IEM_MC_FETCH_EFLAGS(EFlags);
6881 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6882 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6883 else
6884 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6885
6886 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6887 IEM_MC_COMMIT_EFLAGS(EFlags);
6888 IEM_MC_ADVANCE_RIP();
6889 IEM_MC_END();
6890 break;
6891 }
6892 }
6893 }
6894 return VINF_SUCCESS;
6895}
6896
6897
6898/** Opcode 0x84. */
6899FNIEMOP_DEF(iemOp_test_Eb_Gb)
6900{
6901 IEMOP_MNEMONIC("test Eb,Gb");
6902 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6903 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6904 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
6905}
6906
6907
6908/** Opcode 0x85. */
6909FNIEMOP_DEF(iemOp_test_Ev_Gv)
6910{
6911 IEMOP_MNEMONIC("test Ev,Gv");
6912 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6913 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6914 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
6915}
6916
6917
6918/** Opcode 0x86. */
6919FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
6920{
6921 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6922 IEMOP_MNEMONIC("xchg Eb,Gb");
6923
6924 /*
6925 * If rm is denoting a register, no more instruction bytes.
6926 */
6927 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6928 {
6929 IEMOP_HLP_NO_LOCK_PREFIX();
6930
6931 IEM_MC_BEGIN(0, 2);
6932 IEM_MC_LOCAL(uint8_t, uTmp1);
6933 IEM_MC_LOCAL(uint8_t, uTmp2);
6934
6935 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6936 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6937 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6938 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6939
6940 IEM_MC_ADVANCE_RIP();
6941 IEM_MC_END();
6942 }
6943 else
6944 {
6945 /*
6946 * We're accessing memory.
6947 */
6948/** @todo the register must be committed separately! */
6949 IEM_MC_BEGIN(2, 2);
6950 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
6951 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6952 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6953
6954 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6955 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6956 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6957 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
6958 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
6959
6960 IEM_MC_ADVANCE_RIP();
6961 IEM_MC_END();
6962 }
6963 return VINF_SUCCESS;
6964}
6965
6966
6967/** Opcode 0x87. */
6968FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
6969{
6970 IEMOP_MNEMONIC("xchg Ev,Gv");
6971 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6972
6973 /*
6974 * If rm is denoting a register, no more instruction bytes.
6975 */
6976 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6977 {
6978 IEMOP_HLP_NO_LOCK_PREFIX();
6979
6980 switch (pIemCpu->enmEffOpSize)
6981 {
6982 case IEMMODE_16BIT:
6983 IEM_MC_BEGIN(0, 2);
6984 IEM_MC_LOCAL(uint16_t, uTmp1);
6985 IEM_MC_LOCAL(uint16_t, uTmp2);
6986
6987 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6988 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6989 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6990 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6991
6992 IEM_MC_ADVANCE_RIP();
6993 IEM_MC_END();
6994 return VINF_SUCCESS;
6995
6996 case IEMMODE_32BIT:
6997 IEM_MC_BEGIN(0, 2);
6998 IEM_MC_LOCAL(uint32_t, uTmp1);
6999 IEM_MC_LOCAL(uint32_t, uTmp2);
7000
7001 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7002 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7003 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
7004 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
7005
7006 IEM_MC_ADVANCE_RIP();
7007 IEM_MC_END();
7008 return VINF_SUCCESS;
7009
7010 case IEMMODE_64BIT:
7011 IEM_MC_BEGIN(0, 2);
7012 IEM_MC_LOCAL(uint64_t, uTmp1);
7013 IEM_MC_LOCAL(uint64_t, uTmp2);
7014
7015 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7016 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7017 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
7018 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
7019
7020 IEM_MC_ADVANCE_RIP();
7021 IEM_MC_END();
7022 return VINF_SUCCESS;
7023
7024 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7025 }
7026 }
7027 else
7028 {
7029 /*
7030 * We're accessing memory.
7031 */
7032 switch (pIemCpu->enmEffOpSize)
7033 {
7034/** @todo the register must be committed separately! */
7035 case IEMMODE_16BIT:
7036 IEM_MC_BEGIN(2, 2);
7037 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
7038 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
7039 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7040
7041 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7042 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7043 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7044 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
7045 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
7046
7047 IEM_MC_ADVANCE_RIP();
7048 IEM_MC_END();
7049 return VINF_SUCCESS;
7050
7051 case IEMMODE_32BIT:
7052 IEM_MC_BEGIN(2, 2);
7053 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
7054 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
7055 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7056
7057 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7058 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7059 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7060 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
7061 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
7062
7063 IEM_MC_ADVANCE_RIP();
7064 IEM_MC_END();
7065 return VINF_SUCCESS;
7066
7067 case IEMMODE_64BIT:
7068 IEM_MC_BEGIN(2, 2);
7069 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
7070 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
7071 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7072
7073 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7074 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7075 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7076 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
7077 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
7078
7079 IEM_MC_ADVANCE_RIP();
7080 IEM_MC_END();
7081 return VINF_SUCCESS;
7082
7083 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7084 }
7085 }
7086}
7087
7088
7089/** Opcode 0x88. */
7090FNIEMOP_DEF(iemOp_mov_Eb_Gb)
7091{
7092 IEMOP_MNEMONIC("mov Eb,Gb");
7093
7094 uint8_t bRm;
7095 IEM_OPCODE_GET_NEXT_U8(&bRm);
7096 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7097
7098 /*
7099 * If rm is denoting a register, no more instruction bytes.
7100 */
7101 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7102 {
7103 IEM_MC_BEGIN(0, 1);
7104 IEM_MC_LOCAL(uint8_t, u8Value);
7105 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7106 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
7107 IEM_MC_ADVANCE_RIP();
7108 IEM_MC_END();
7109 }
7110 else
7111 {
7112 /*
7113 * We're writing a register to memory.
7114 */
7115 IEM_MC_BEGIN(0, 2);
7116 IEM_MC_LOCAL(uint8_t, u8Value);
7117 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7118 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7119 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7120 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
7121 IEM_MC_ADVANCE_RIP();
7122 IEM_MC_END();
7123 }
7124 return VINF_SUCCESS;
7125
7126}
7127
7128
7129/** Opcode 0x89. */
7130FNIEMOP_DEF(iemOp_mov_Ev_Gv)
7131{
7132 IEMOP_MNEMONIC("mov Ev,Gv");
7133
7134 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7135 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7136
7137 /*
7138 * If rm is denoting a register, no more instruction bytes.
7139 */
7140 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7141 {
7142 switch (pIemCpu->enmEffOpSize)
7143 {
7144 case IEMMODE_16BIT:
7145 IEM_MC_BEGIN(0, 1);
7146 IEM_MC_LOCAL(uint16_t, u16Value);
7147 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7148 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7149 IEM_MC_ADVANCE_RIP();
7150 IEM_MC_END();
7151 break;
7152
7153 case IEMMODE_32BIT:
7154 IEM_MC_BEGIN(0, 1);
7155 IEM_MC_LOCAL(uint32_t, u32Value);
7156 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7157 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7158 IEM_MC_ADVANCE_RIP();
7159 IEM_MC_END();
7160 break;
7161
7162 case IEMMODE_64BIT:
7163 IEM_MC_BEGIN(0, 1);
7164 IEM_MC_LOCAL(uint64_t, u64Value);
7165 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7166 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7167 IEM_MC_ADVANCE_RIP();
7168 IEM_MC_END();
7169 break;
7170 }
7171 }
7172 else
7173 {
7174 /*
7175 * We're writing a register to memory.
7176 */
7177 switch (pIemCpu->enmEffOpSize)
7178 {
7179 case IEMMODE_16BIT:
7180 IEM_MC_BEGIN(0, 2);
7181 IEM_MC_LOCAL(uint16_t, u16Value);
7182 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7183 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7184 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7185 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7186 IEM_MC_ADVANCE_RIP();
7187 IEM_MC_END();
7188 break;
7189
7190 case IEMMODE_32BIT:
7191 IEM_MC_BEGIN(0, 2);
7192 IEM_MC_LOCAL(uint32_t, u32Value);
7193 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7194 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7195 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7196 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
7197 IEM_MC_ADVANCE_RIP();
7198 IEM_MC_END();
7199 break;
7200
7201 case IEMMODE_64BIT:
7202 IEM_MC_BEGIN(0, 2);
7203 IEM_MC_LOCAL(uint64_t, u64Value);
7204 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7205 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7206 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7207 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
7208 IEM_MC_ADVANCE_RIP();
7209 IEM_MC_END();
7210 break;
7211 }
7212 }
7213 return VINF_SUCCESS;
7214}
7215
7216
7217/** Opcode 0x8a. */
7218FNIEMOP_DEF(iemOp_mov_Gb_Eb)
7219{
7220 IEMOP_MNEMONIC("mov Gb,Eb");
7221
7222 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7223 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7224
7225 /*
7226 * If rm is denoting a register, no more instruction bytes.
7227 */
7228 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7229 {
7230 IEM_MC_BEGIN(0, 1);
7231 IEM_MC_LOCAL(uint8_t, u8Value);
7232 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7233 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7234 IEM_MC_ADVANCE_RIP();
7235 IEM_MC_END();
7236 }
7237 else
7238 {
7239 /*
7240 * We're loading a register from memory.
7241 */
7242 IEM_MC_BEGIN(0, 2);
7243 IEM_MC_LOCAL(uint8_t, u8Value);
7244 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7245 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7246 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
7247 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7248 IEM_MC_ADVANCE_RIP();
7249 IEM_MC_END();
7250 }
7251 return VINF_SUCCESS;
7252}
7253
7254
7255/** Opcode 0x8b. */
7256FNIEMOP_DEF(iemOp_mov_Gv_Ev)
7257{
7258 IEMOP_MNEMONIC("mov Gv,Ev");
7259
7260 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7261 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7262
7263 /*
7264 * If rm is denoting a register, no more instruction bytes.
7265 */
7266 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7267 {
7268 switch (pIemCpu->enmEffOpSize)
7269 {
7270 case IEMMODE_16BIT:
7271 IEM_MC_BEGIN(0, 1);
7272 IEM_MC_LOCAL(uint16_t, u16Value);
7273 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7274 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7275 IEM_MC_ADVANCE_RIP();
7276 IEM_MC_END();
7277 break;
7278
7279 case IEMMODE_32BIT:
7280 IEM_MC_BEGIN(0, 1);
7281 IEM_MC_LOCAL(uint32_t, u32Value);
7282 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7283 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7284 IEM_MC_ADVANCE_RIP();
7285 IEM_MC_END();
7286 break;
7287
7288 case IEMMODE_64BIT:
7289 IEM_MC_BEGIN(0, 1);
7290 IEM_MC_LOCAL(uint64_t, u64Value);
7291 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7292 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7293 IEM_MC_ADVANCE_RIP();
7294 IEM_MC_END();
7295 break;
7296 }
7297 }
7298 else
7299 {
7300 /*
7301 * We're loading a register from memory.
7302 */
7303 switch (pIemCpu->enmEffOpSize)
7304 {
7305 case IEMMODE_16BIT:
7306 IEM_MC_BEGIN(0, 2);
7307 IEM_MC_LOCAL(uint16_t, u16Value);
7308 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7309 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7310 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7311 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7312 IEM_MC_ADVANCE_RIP();
7313 IEM_MC_END();
7314 break;
7315
7316 case IEMMODE_32BIT:
7317 IEM_MC_BEGIN(0, 2);
7318 IEM_MC_LOCAL(uint32_t, u32Value);
7319 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7320 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7321 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
7322 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7323 IEM_MC_ADVANCE_RIP();
7324 IEM_MC_END();
7325 break;
7326
7327 case IEMMODE_64BIT:
7328 IEM_MC_BEGIN(0, 2);
7329 IEM_MC_LOCAL(uint64_t, u64Value);
7330 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7331 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7332 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
7333 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7334 IEM_MC_ADVANCE_RIP();
7335 IEM_MC_END();
7336 break;
7337 }
7338 }
7339 return VINF_SUCCESS;
7340}
7341
7342
7343/** Opcode 0x8c. */
7344FNIEMOP_DEF(iemOp_mov_Ev_Sw)
7345{
7346 IEMOP_MNEMONIC("mov Ev,Sw");
7347
7348 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7349 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7350
7351 /*
7352 * Check that the destination register exists. The REX.R prefix is ignored.
7353 */
7354 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7355 if ( iSegReg > X86_SREG_GS)
7356 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7357
7358 /*
7359 * If rm is denoting a register, no more instruction bytes.
7360 * In that case, the operand size is respected and the upper bits are
7361 * cleared (starting with some pentium).
7362 */
7363 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7364 {
7365 switch (pIemCpu->enmEffOpSize)
7366 {
7367 case IEMMODE_16BIT:
7368 IEM_MC_BEGIN(0, 1);
7369 IEM_MC_LOCAL(uint16_t, u16Value);
7370 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7371 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7372 IEM_MC_ADVANCE_RIP();
7373 IEM_MC_END();
7374 break;
7375
7376 case IEMMODE_32BIT:
7377 IEM_MC_BEGIN(0, 1);
7378 IEM_MC_LOCAL(uint32_t, u32Value);
7379 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
7380 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7381 IEM_MC_ADVANCE_RIP();
7382 IEM_MC_END();
7383 break;
7384
7385 case IEMMODE_64BIT:
7386 IEM_MC_BEGIN(0, 1);
7387 IEM_MC_LOCAL(uint64_t, u64Value);
7388 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
7389 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7390 IEM_MC_ADVANCE_RIP();
7391 IEM_MC_END();
7392 break;
7393 }
7394 }
7395 else
7396 {
7397 /*
7398 * We're saving the register to memory. The access is word sized
7399 * regardless of operand size prefixes.
7400 */
7401#if 0 /* not necessary */
7402 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7403#endif
7404 IEM_MC_BEGIN(0, 2);
7405 IEM_MC_LOCAL(uint16_t, u16Value);
7406 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7407 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7408 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7409 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7410 IEM_MC_ADVANCE_RIP();
7411 IEM_MC_END();
7412 }
7413 return VINF_SUCCESS;
7414}
7415
7416
7417
7418
7419/** Opcode 0x8d. */
7420FNIEMOP_DEF(iemOp_lea_Gv_M)
7421{
7422 IEMOP_MNEMONIC("lea Gv,M");
7423 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7424 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7425 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7426 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */
7427
7428 switch (pIemCpu->enmEffOpSize)
7429 {
7430 case IEMMODE_16BIT:
7431 IEM_MC_BEGIN(0, 2);
7432 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7433 IEM_MC_LOCAL(uint16_t, u16Cast);
7434 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7435 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
7436 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
7437 IEM_MC_ADVANCE_RIP();
7438 IEM_MC_END();
7439 return VINF_SUCCESS;
7440
7441 case IEMMODE_32BIT:
7442 IEM_MC_BEGIN(0, 2);
7443 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7444 IEM_MC_LOCAL(uint32_t, u32Cast);
7445 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7446 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
7447 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
7448 IEM_MC_ADVANCE_RIP();
7449 IEM_MC_END();
7450 return VINF_SUCCESS;
7451
7452 case IEMMODE_64BIT:
7453 IEM_MC_BEGIN(0, 1);
7454 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7455 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7456 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
7457 IEM_MC_ADVANCE_RIP();
7458 IEM_MC_END();
7459 return VINF_SUCCESS;
7460 }
7461 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
7462}
7463
7464
7465/** Opcode 0x8e. */
7466FNIEMOP_DEF(iemOp_mov_Sw_Ev)
7467{
7468 IEMOP_MNEMONIC("mov Sw,Ev");
7469
7470 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7471 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7472
7473 /*
7474 * The practical operand size is 16-bit.
7475 */
7476#if 0 /* not necessary */
7477 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7478#endif
7479
7480 /*
7481 * Check that the destination register exists and can be used with this
7482 * instruction. The REX.R prefix is ignored.
7483 */
7484 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7485 if ( iSegReg == X86_SREG_CS
7486 || iSegReg > X86_SREG_GS)
7487 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7488
7489 /*
7490 * If rm is denoting a register, no more instruction bytes.
7491 */
7492 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7493 {
7494 IEM_MC_BEGIN(2, 0);
7495 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7496 IEM_MC_ARG(uint16_t, u16Value, 1);
7497 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7498 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7499 IEM_MC_END();
7500 }
7501 else
7502 {
7503 /*
7504 * We're loading the register from memory. The access is word sized
7505 * regardless of operand size prefixes.
7506 */
7507 IEM_MC_BEGIN(2, 1);
7508 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7509 IEM_MC_ARG(uint16_t, u16Value, 1);
7510 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7511 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7512 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7513 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7514 IEM_MC_END();
7515 }
7516 return VINF_SUCCESS;
7517}
7518
7519
7520/** Opcode 0x8f. */
7521FNIEMOP_DEF(iemOp_pop_Ev)
7522{
7523 /* This bugger is rather annoying as it requires rSP to be updated before
7524 doing the effective address calculations. Will eventually require a
7525 split between the R/M+SIB decoding and the effective address
7526 calculation - which is something that is required for any attempt at
7527 reusing this code for a recompiler. It may also be good to have if we
7528 need to delay #UD exception caused by invalid lock prefixes.
7529
7530 For now, we'll do a mostly safe interpreter-only implementation here. */
7531 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
7532 * now until tests show it's checked.. */
7533 IEMOP_MNEMONIC("pop Ev");
7534 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7535 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7536
7537 /* Register access is relatively easy and can share code. */
7538 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7539 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7540
7541 /*
7542 * Memory target.
7543 *
7544 * Intel says that RSP is incremented before it's used in any effective
7545 * address calcuations. This means some serious extra annoyance here since
7546 * we decode and caclulate the effective address in one step and like to
7547 * delay committing registers till everything is done.
7548 *
7549 * So, we'll decode and calculate the effective address twice. This will
7550 * require some recoding if turned into a recompiler.
7551 */
7552 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
7553
7554#ifndef TST_IEM_CHECK_MC
7555 /* Calc effective address with modified ESP. */
7556 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
7557 RTGCPTR GCPtrEff;
7558 VBOXSTRICTRC rcStrict;
7559 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7560 if (rcStrict != VINF_SUCCESS)
7561 return rcStrict;
7562 pIemCpu->offOpcode = offOpcodeSaved;
7563
7564 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7565 uint64_t const RspSaved = pCtx->rsp;
7566 switch (pIemCpu->enmEffOpSize)
7567 {
7568 case IEMMODE_16BIT: iemRegAddToRsp(pCtx, 2); break;
7569 case IEMMODE_32BIT: iemRegAddToRsp(pCtx, 4); break;
7570 case IEMMODE_64BIT: iemRegAddToRsp(pCtx, 8); break;
7571 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7572 }
7573 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7574 Assert(rcStrict == VINF_SUCCESS);
7575 pCtx->rsp = RspSaved;
7576
7577 /* Perform the operation - this should be CImpl. */
7578 RTUINT64U TmpRsp;
7579 TmpRsp.u = pCtx->rsp;
7580 switch (pIemCpu->enmEffOpSize)
7581 {
7582 case IEMMODE_16BIT:
7583 {
7584 uint16_t u16Value;
7585 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
7586 if (rcStrict == VINF_SUCCESS)
7587 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
7588 break;
7589 }
7590
7591 case IEMMODE_32BIT:
7592 {
7593 uint32_t u32Value;
7594 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
7595 if (rcStrict == VINF_SUCCESS)
7596 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
7597 break;
7598 }
7599
7600 case IEMMODE_64BIT:
7601 {
7602 uint64_t u64Value;
7603 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
7604 if (rcStrict == VINF_SUCCESS)
7605 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
7606 break;
7607 }
7608
7609 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7610 }
7611 if (rcStrict == VINF_SUCCESS)
7612 {
7613 pCtx->rsp = TmpRsp.u;
7614 iemRegUpdateRip(pIemCpu);
7615 }
7616 return rcStrict;
7617
7618#else
7619 return VERR_NOT_IMPLEMENTED;
7620#endif
7621}
7622
7623
7624/**
7625 * Common 'xchg reg,rAX' helper.
7626 */
7627FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
7628{
7629 IEMOP_HLP_NO_LOCK_PREFIX();
7630
7631 iReg |= pIemCpu->uRexB;
7632 switch (pIemCpu->enmEffOpSize)
7633 {
7634 case IEMMODE_16BIT:
7635 IEM_MC_BEGIN(0, 2);
7636 IEM_MC_LOCAL(uint16_t, u16Tmp1);
7637 IEM_MC_LOCAL(uint16_t, u16Tmp2);
7638 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
7639 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
7640 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
7641 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
7642 IEM_MC_ADVANCE_RIP();
7643 IEM_MC_END();
7644 return VINF_SUCCESS;
7645
7646 case IEMMODE_32BIT:
7647 IEM_MC_BEGIN(0, 2);
7648 IEM_MC_LOCAL(uint32_t, u32Tmp1);
7649 IEM_MC_LOCAL(uint32_t, u32Tmp2);
7650 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
7651 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
7652 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
7653 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
7654 IEM_MC_ADVANCE_RIP();
7655 IEM_MC_END();
7656 return VINF_SUCCESS;
7657
7658 case IEMMODE_64BIT:
7659 IEM_MC_BEGIN(0, 2);
7660 IEM_MC_LOCAL(uint64_t, u64Tmp1);
7661 IEM_MC_LOCAL(uint64_t, u64Tmp2);
7662 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
7663 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
7664 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
7665 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
7666 IEM_MC_ADVANCE_RIP();
7667 IEM_MC_END();
7668 return VINF_SUCCESS;
7669
7670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7671 }
7672}
7673
7674
7675/** Opcode 0x90. */
7676FNIEMOP_DEF(iemOp_nop)
7677{
7678 /* R8/R8D and RAX/EAX can be exchanged. */
7679 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
7680 {
7681 IEMOP_MNEMONIC("xchg r8,rAX");
7682 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
7683 }
7684
7685 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
7686 IEMOP_MNEMONIC("pause");
7687 else
7688 IEMOP_MNEMONIC("nop");
7689 IEM_MC_BEGIN(0, 0);
7690 IEM_MC_ADVANCE_RIP();
7691 IEM_MC_END();
7692 return VINF_SUCCESS;
7693}
7694
7695
7696/** Opcode 0x91. */
7697FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
7698{
7699 IEMOP_MNEMONIC("xchg rCX,rAX");
7700 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
7701}
7702
7703
7704/** Opcode 0x92. */
7705FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
7706{
7707 IEMOP_MNEMONIC("xchg rDX,rAX");
7708 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
7709}
7710
7711
7712/** Opcode 0x93. */
7713FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
7714{
7715 IEMOP_MNEMONIC("xchg rBX,rAX");
7716 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
7717}
7718
7719
7720/** Opcode 0x94. */
7721FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
7722{
7723 IEMOP_MNEMONIC("xchg rSX,rAX");
7724 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
7725}
7726
7727
7728/** Opcode 0x95. */
7729FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
7730{
7731 IEMOP_MNEMONIC("xchg rBP,rAX");
7732 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
7733}
7734
7735
7736/** Opcode 0x96. */
7737FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
7738{
7739 IEMOP_MNEMONIC("xchg rSI,rAX");
7740 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
7741}
7742
7743
7744/** Opcode 0x97. */
7745FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
7746{
7747 IEMOP_MNEMONIC("xchg rDI,rAX");
7748 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
7749}
7750
7751
7752/** Opcode 0x98. */
7753FNIEMOP_DEF(iemOp_cbw)
7754{
7755 IEMOP_HLP_NO_LOCK_PREFIX();
7756 switch (pIemCpu->enmEffOpSize)
7757 {
7758 case IEMMODE_16BIT:
7759 IEMOP_MNEMONIC("cbw");
7760 IEM_MC_BEGIN(0, 1);
7761 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 7) {
7762 IEM_MC_OR_GREG_U16(X86_GREG_xAX, UINT16_C(0xff00));
7763 } IEM_MC_ELSE() {
7764 IEM_MC_AND_GREG_U16(X86_GREG_xAX, UINT16_C(0x00ff));
7765 } IEM_MC_ENDIF();
7766 IEM_MC_ADVANCE_RIP();
7767 IEM_MC_END();
7768 return VINF_SUCCESS;
7769
7770 case IEMMODE_32BIT:
7771 IEMOP_MNEMONIC("cwde");
7772 IEM_MC_BEGIN(0, 1);
7773 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
7774 IEM_MC_OR_GREG_U32(X86_GREG_xAX, UINT32_C(0xffff0000));
7775 } IEM_MC_ELSE() {
7776 IEM_MC_AND_GREG_U32(X86_GREG_xAX, UINT32_C(0x0000ffff));
7777 } IEM_MC_ENDIF();
7778 IEM_MC_ADVANCE_RIP();
7779 IEM_MC_END();
7780 return VINF_SUCCESS;
7781
7782 case IEMMODE_64BIT:
7783 IEMOP_MNEMONIC("cdqe");
7784 IEM_MC_BEGIN(0, 1);
7785 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
7786 IEM_MC_OR_GREG_U64(X86_GREG_xAX, UINT64_C(0xffffffff00000000));
7787 } IEM_MC_ELSE() {
7788 IEM_MC_AND_GREG_U64(X86_GREG_xAX, UINT64_C(0x00000000ffffffff));
7789 } IEM_MC_ENDIF();
7790 IEM_MC_ADVANCE_RIP();
7791 IEM_MC_END();
7792 return VINF_SUCCESS;
7793
7794 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7795 }
7796}
7797
7798
7799/** Opcode 0x99. */
7800FNIEMOP_DEF(iemOp_cwd)
7801{
7802 IEMOP_HLP_NO_LOCK_PREFIX();
7803 switch (pIemCpu->enmEffOpSize)
7804 {
7805 case IEMMODE_16BIT:
7806 IEMOP_MNEMONIC("cwd");
7807 IEM_MC_BEGIN(0, 1);
7808 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
7809 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
7810 } IEM_MC_ELSE() {
7811 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
7812 } IEM_MC_ENDIF();
7813 IEM_MC_ADVANCE_RIP();
7814 IEM_MC_END();
7815 return VINF_SUCCESS;
7816
7817 case IEMMODE_32BIT:
7818 IEMOP_MNEMONIC("cdq");
7819 IEM_MC_BEGIN(0, 1);
7820 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
7821 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
7822 } IEM_MC_ELSE() {
7823 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
7824 } IEM_MC_ENDIF();
7825 IEM_MC_ADVANCE_RIP();
7826 IEM_MC_END();
7827 return VINF_SUCCESS;
7828
7829 case IEMMODE_64BIT:
7830 IEMOP_MNEMONIC("cqo");
7831 IEM_MC_BEGIN(0, 1);
7832 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
7833 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
7834 } IEM_MC_ELSE() {
7835 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
7836 } IEM_MC_ENDIF();
7837 IEM_MC_ADVANCE_RIP();
7838 IEM_MC_END();
7839 return VINF_SUCCESS;
7840
7841 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7842 }
7843}
7844
7845
7846/** Opcode 0x9a. */
7847FNIEMOP_DEF(iemOp_call_Ap)
7848{
7849 IEMOP_MNEMONIC("call Ap");
7850 IEMOP_HLP_NO_64BIT();
7851
7852 /* Decode the far pointer address and pass it on to the far call C implementation. */
7853 uint32_t offSeg;
7854 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
7855 IEM_OPCODE_GET_NEXT_U32(&offSeg);
7856 else
7857 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
7858 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
7859 IEMOP_HLP_NO_LOCK_PREFIX();
7860 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, pIemCpu->enmEffOpSize);
7861}
7862
7863
7864/** Opcode 0x9b. (aka fwait) */
7865FNIEMOP_DEF(iemOp_wait)
7866{
7867 IEMOP_MNEMONIC("wait");
7868 IEMOP_HLP_NO_LOCK_PREFIX();
7869
7870 IEM_MC_BEGIN(0, 0);
7871 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
7872 IEM_MC_MAYBE_RAISE_FPU_XCPT();
7873 IEM_MC_ADVANCE_RIP();
7874 IEM_MC_END();
7875 return VINF_SUCCESS;
7876}
7877
7878
7879/** Opcode 0x9c. */
7880FNIEMOP_DEF(iemOp_pushf_Fv)
7881{
7882 IEMOP_HLP_NO_LOCK_PREFIX();
7883 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7884 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
7885}
7886
7887
7888/** Opcode 0x9d. */
7889FNIEMOP_DEF(iemOp_popf_Fv)
7890{
7891 IEMOP_HLP_NO_LOCK_PREFIX();
7892 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7893 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
7894}
7895
7896
7897/** Opcode 0x9e. */
7898FNIEMOP_DEF(iemOp_sahf)
7899{
7900 IEMOP_MNEMONIC("sahf");
7901 IEMOP_HLP_NO_LOCK_PREFIX();
7902 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
7903 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
7904 return IEMOP_RAISE_INVALID_OPCODE();
7905 IEM_MC_BEGIN(0, 2);
7906 IEM_MC_LOCAL(uint32_t, u32Flags);
7907 IEM_MC_LOCAL(uint32_t, EFlags);
7908 IEM_MC_FETCH_EFLAGS(EFlags);
7909 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Flags, X86_GREG_xSP/*=AH*/);
7910 IEM_MC_AND_LOCAL_U32(u32Flags, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
7911 IEM_MC_AND_LOCAL_U32(EFlags, UINT32_C(0xffffff00));
7912 IEM_MC_OR_LOCAL_U32(u32Flags, X86_EFL_1);
7913 IEM_MC_OR_2LOCS_U32(EFlags, u32Flags);
7914 IEM_MC_COMMIT_EFLAGS(EFlags);
7915 IEM_MC_ADVANCE_RIP();
7916 IEM_MC_END();
7917 return VINF_SUCCESS;
7918}
7919
7920
7921/** Opcode 0x9f. */
7922FNIEMOP_DEF(iemOp_lahf)
7923{
7924 IEMOP_MNEMONIC("lahf");
7925 IEMOP_HLP_NO_LOCK_PREFIX();
7926 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
7927 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
7928 return IEMOP_RAISE_INVALID_OPCODE();
7929 IEM_MC_BEGIN(0, 1);
7930 IEM_MC_LOCAL(uint8_t, u8Flags);
7931 IEM_MC_FETCH_EFLAGS_U8(u8Flags);
7932 IEM_MC_STORE_GREG_U8(X86_GREG_xSP/*=AH*/, u8Flags);
7933 IEM_MC_ADVANCE_RIP();
7934 IEM_MC_END();
7935 return VINF_SUCCESS;
7936}
7937
7938
7939/**
7940 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
7941 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
7942 * prefixes. Will return on failures.
7943 * @param a_GCPtrMemOff The variable to store the offset in.
7944 */
7945#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
7946 do \
7947 { \
7948 switch (pIemCpu->enmEffAddrMode) \
7949 { \
7950 case IEMMODE_16BIT: \
7951 IEM_OPCODE_GET_NEXT_U16_ZX_U64(&(a_GCPtrMemOff)); \
7952 break; \
7953 case IEMMODE_32BIT: \
7954 IEM_OPCODE_GET_NEXT_U32_ZX_U64(&(a_GCPtrMemOff)); \
7955 break; \
7956 case IEMMODE_64BIT: \
7957 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
7958 break; \
7959 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
7960 } \
7961 IEMOP_HLP_NO_LOCK_PREFIX(); \
7962 } while (0)
7963
7964/** Opcode 0xa0. */
7965FNIEMOP_DEF(iemOp_mov_Al_Ob)
7966{
7967 /*
7968 * Get the offset and fend of lock prefixes.
7969 */
7970 RTGCPTR GCPtrMemOff;
7971 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7972
7973 /*
7974 * Fetch AL.
7975 */
7976 IEM_MC_BEGIN(0,1);
7977 IEM_MC_LOCAL(uint8_t, u8Tmp);
7978 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7979 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
7980 IEM_MC_ADVANCE_RIP();
7981 IEM_MC_END();
7982 return VINF_SUCCESS;
7983}
7984
7985
7986/** Opcode 0xa1. */
7987FNIEMOP_DEF(iemOp_mov_rAX_Ov)
7988{
7989 /*
7990 * Get the offset and fend of lock prefixes.
7991 */
7992 IEMOP_MNEMONIC("mov rAX,Ov");
7993 RTGCPTR GCPtrMemOff;
7994 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7995
7996 /*
7997 * Fetch rAX.
7998 */
7999 switch (pIemCpu->enmEffOpSize)
8000 {
8001 case IEMMODE_16BIT:
8002 IEM_MC_BEGIN(0,1);
8003 IEM_MC_LOCAL(uint16_t, u16Tmp);
8004 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8005 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
8006 IEM_MC_ADVANCE_RIP();
8007 IEM_MC_END();
8008 return VINF_SUCCESS;
8009
8010 case IEMMODE_32BIT:
8011 IEM_MC_BEGIN(0,1);
8012 IEM_MC_LOCAL(uint32_t, u32Tmp);
8013 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8014 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
8015 IEM_MC_ADVANCE_RIP();
8016 IEM_MC_END();
8017 return VINF_SUCCESS;
8018
8019 case IEMMODE_64BIT:
8020 IEM_MC_BEGIN(0,1);
8021 IEM_MC_LOCAL(uint64_t, u64Tmp);
8022 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8023 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
8024 IEM_MC_ADVANCE_RIP();
8025 IEM_MC_END();
8026 return VINF_SUCCESS;
8027
8028 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8029 }
8030}
8031
8032
8033/** Opcode 0xa2. */
8034FNIEMOP_DEF(iemOp_mov_Ob_AL)
8035{
8036 /*
8037 * Get the offset and fend of lock prefixes.
8038 */
8039 RTGCPTR GCPtrMemOff;
8040 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8041
8042 /*
8043 * Store AL.
8044 */
8045 IEM_MC_BEGIN(0,1);
8046 IEM_MC_LOCAL(uint8_t, u8Tmp);
8047 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
8048 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
8049 IEM_MC_ADVANCE_RIP();
8050 IEM_MC_END();
8051 return VINF_SUCCESS;
8052}
8053
8054
8055/** Opcode 0xa3. */
8056FNIEMOP_DEF(iemOp_mov_Ov_rAX)
8057{
8058 /*
8059 * Get the offset and fend of lock prefixes.
8060 */
8061 RTGCPTR GCPtrMemOff;
8062 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8063
8064 /*
8065 * Store rAX.
8066 */
8067 switch (pIemCpu->enmEffOpSize)
8068 {
8069 case IEMMODE_16BIT:
8070 IEM_MC_BEGIN(0,1);
8071 IEM_MC_LOCAL(uint16_t, u16Tmp);
8072 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
8073 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
8074 IEM_MC_ADVANCE_RIP();
8075 IEM_MC_END();
8076 return VINF_SUCCESS;
8077
8078 case IEMMODE_32BIT:
8079 IEM_MC_BEGIN(0,1);
8080 IEM_MC_LOCAL(uint32_t, u32Tmp);
8081 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
8082 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
8083 IEM_MC_ADVANCE_RIP();
8084 IEM_MC_END();
8085 return VINF_SUCCESS;
8086
8087 case IEMMODE_64BIT:
8088 IEM_MC_BEGIN(0,1);
8089 IEM_MC_LOCAL(uint64_t, u64Tmp);
8090 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
8091 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
8092 IEM_MC_ADVANCE_RIP();
8093 IEM_MC_END();
8094 return VINF_SUCCESS;
8095
8096 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8097 }
8098}
8099
8100/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
8101#define IEM_MOVS_CASE(ValBits, AddrBits) \
8102 IEM_MC_BEGIN(0, 2); \
8103 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8104 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8105 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8106 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8107 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8108 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8109 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8110 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8111 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8112 } IEM_MC_ELSE() { \
8113 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8114 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8115 } IEM_MC_ENDIF(); \
8116 IEM_MC_ADVANCE_RIP(); \
8117 IEM_MC_END();
8118
8119/** Opcode 0xa4. */
8120FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
8121{
8122 IEMOP_HLP_NO_LOCK_PREFIX();
8123
8124 /*
8125 * Use the C implementation if a repeat prefix is encountered.
8126 */
8127 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8128 {
8129 IEMOP_MNEMONIC("rep movsb Xb,Yb");
8130 switch (pIemCpu->enmEffAddrMode)
8131 {
8132 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
8133 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
8134 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
8135 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8136 }
8137 }
8138 IEMOP_MNEMONIC("movsb Xb,Yb");
8139
8140 /*
8141 * Sharing case implementation with movs[wdq] below.
8142 */
8143 switch (pIemCpu->enmEffAddrMode)
8144 {
8145 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
8146 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
8147 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
8148 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8149 }
8150 return VINF_SUCCESS;
8151}
8152
8153
8154/** Opcode 0xa5. */
8155FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
8156{
8157 IEMOP_HLP_NO_LOCK_PREFIX();
8158
8159 /*
8160 * Use the C implementation if a repeat prefix is encountered.
8161 */
8162 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8163 {
8164 IEMOP_MNEMONIC("rep movs Xv,Yv");
8165 switch (pIemCpu->enmEffOpSize)
8166 {
8167 case IEMMODE_16BIT:
8168 switch (pIemCpu->enmEffAddrMode)
8169 {
8170 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
8171 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
8172 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
8173 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8174 }
8175 break;
8176 case IEMMODE_32BIT:
8177 switch (pIemCpu->enmEffAddrMode)
8178 {
8179 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
8180 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
8181 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
8182 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8183 }
8184 case IEMMODE_64BIT:
8185 switch (pIemCpu->enmEffAddrMode)
8186 {
8187 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8188 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
8189 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
8190 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8191 }
8192 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8193 }
8194 }
8195 IEMOP_MNEMONIC("movs Xv,Yv");
8196
8197 /*
8198 * Annoying double switch here.
8199 * Using ugly macro for implementing the cases, sharing it with movsb.
8200 */
8201 switch (pIemCpu->enmEffOpSize)
8202 {
8203 case IEMMODE_16BIT:
8204 switch (pIemCpu->enmEffAddrMode)
8205 {
8206 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
8207 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
8208 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
8209 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8210 }
8211 break;
8212
8213 case IEMMODE_32BIT:
8214 switch (pIemCpu->enmEffAddrMode)
8215 {
8216 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
8217 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
8218 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
8219 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8220 }
8221 break;
8222
8223 case IEMMODE_64BIT:
8224 switch (pIemCpu->enmEffAddrMode)
8225 {
8226 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8227 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
8228 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
8229 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8230 }
8231 break;
8232 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8233 }
8234 return VINF_SUCCESS;
8235}
8236
8237#undef IEM_MOVS_CASE
8238
8239/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
8240#define IEM_CMPS_CASE(ValBits, AddrBits) \
8241 IEM_MC_BEGIN(3, 3); \
8242 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
8243 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
8244 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8245 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
8246 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8247 \
8248 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8249 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
8250 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8251 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
8252 IEM_MC_REF_LOCAL(puValue1, uValue1); \
8253 IEM_MC_REF_EFLAGS(pEFlags); \
8254 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
8255 \
8256 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8257 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8258 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8259 } IEM_MC_ELSE() { \
8260 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8261 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8262 } IEM_MC_ENDIF(); \
8263 IEM_MC_ADVANCE_RIP(); \
8264 IEM_MC_END(); \
8265
8266/** Opcode 0xa6. */
8267FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
8268{
8269 IEMOP_HLP_NO_LOCK_PREFIX();
8270
8271 /*
8272 * Use the C implementation if a repeat prefix is encountered.
8273 */
8274 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8275 {
8276 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8277 switch (pIemCpu->enmEffAddrMode)
8278 {
8279 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
8280 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
8281 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
8282 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8283 }
8284 }
8285 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8286 {
8287 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8288 switch (pIemCpu->enmEffAddrMode)
8289 {
8290 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
8291 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
8292 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
8293 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8294 }
8295 }
8296 IEMOP_MNEMONIC("cmps Xb,Yb");
8297
8298 /*
8299 * Sharing case implementation with cmps[wdq] below.
8300 */
8301 switch (pIemCpu->enmEffAddrMode)
8302 {
8303 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
8304 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
8305 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
8306 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8307 }
8308 return VINF_SUCCESS;
8309
8310}
8311
8312
8313/** Opcode 0xa7. */
8314FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
8315{
8316 IEMOP_HLP_NO_LOCK_PREFIX();
8317
8318 /*
8319 * Use the C implementation if a repeat prefix is encountered.
8320 */
8321 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8322 {
8323 IEMOP_MNEMONIC("repe cmps Xv,Yv");
8324 switch (pIemCpu->enmEffOpSize)
8325 {
8326 case IEMMODE_16BIT:
8327 switch (pIemCpu->enmEffAddrMode)
8328 {
8329 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
8330 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
8331 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
8332 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8333 }
8334 break;
8335 case IEMMODE_32BIT:
8336 switch (pIemCpu->enmEffAddrMode)
8337 {
8338 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
8339 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
8340 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
8341 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8342 }
8343 case IEMMODE_64BIT:
8344 switch (pIemCpu->enmEffAddrMode)
8345 {
8346 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8347 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
8348 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
8349 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8350 }
8351 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8352 }
8353 }
8354
8355 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8356 {
8357 IEMOP_MNEMONIC("repne cmps Xv,Yv");
8358 switch (pIemCpu->enmEffOpSize)
8359 {
8360 case IEMMODE_16BIT:
8361 switch (pIemCpu->enmEffAddrMode)
8362 {
8363 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
8364 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
8365 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
8366 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8367 }
8368 break;
8369 case IEMMODE_32BIT:
8370 switch (pIemCpu->enmEffAddrMode)
8371 {
8372 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
8373 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
8374 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
8375 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8376 }
8377 case IEMMODE_64BIT:
8378 switch (pIemCpu->enmEffAddrMode)
8379 {
8380 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8381 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
8382 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
8383 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8384 }
8385 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8386 }
8387 }
8388
8389 IEMOP_MNEMONIC("cmps Xv,Yv");
8390
8391 /*
8392 * Annoying double switch here.
8393 * Using ugly macro for implementing the cases, sharing it with cmpsb.
8394 */
8395 switch (pIemCpu->enmEffOpSize)
8396 {
8397 case IEMMODE_16BIT:
8398 switch (pIemCpu->enmEffAddrMode)
8399 {
8400 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
8401 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
8402 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
8403 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8404 }
8405 break;
8406
8407 case IEMMODE_32BIT:
8408 switch (pIemCpu->enmEffAddrMode)
8409 {
8410 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
8411 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
8412 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
8413 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8414 }
8415 break;
8416
8417 case IEMMODE_64BIT:
8418 switch (pIemCpu->enmEffAddrMode)
8419 {
8420 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8421 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
8422 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
8423 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8424 }
8425 break;
8426 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8427 }
8428 return VINF_SUCCESS;
8429
8430}
8431
8432#undef IEM_CMPS_CASE
8433
8434/** Opcode 0xa8. */
8435FNIEMOP_DEF(iemOp_test_AL_Ib)
8436{
8437 IEMOP_MNEMONIC("test al,Ib");
8438 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8439 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
8440}
8441
8442
8443/** Opcode 0xa9. */
8444FNIEMOP_DEF(iemOp_test_eAX_Iz)
8445{
8446 IEMOP_MNEMONIC("test rAX,Iz");
8447 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8448 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
8449}
8450
8451
8452/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
8453#define IEM_STOS_CASE(ValBits, AddrBits) \
8454 IEM_MC_BEGIN(0, 2); \
8455 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8456 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8457 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
8458 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8459 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8460 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8461 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8462 } IEM_MC_ELSE() { \
8463 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8464 } IEM_MC_ENDIF(); \
8465 IEM_MC_ADVANCE_RIP(); \
8466 IEM_MC_END(); \
8467
8468/** Opcode 0xaa. */
8469FNIEMOP_DEF(iemOp_stosb_Yb_AL)
8470{
8471 IEMOP_HLP_NO_LOCK_PREFIX();
8472
8473 /*
8474 * Use the C implementation if a repeat prefix is encountered.
8475 */
8476 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8477 {
8478 IEMOP_MNEMONIC("rep stos Yb,al");
8479 switch (pIemCpu->enmEffAddrMode)
8480 {
8481 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
8482 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
8483 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
8484 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8485 }
8486 }
8487 IEMOP_MNEMONIC("stos Yb,al");
8488
8489 /*
8490 * Sharing case implementation with stos[wdq] below.
8491 */
8492 switch (pIemCpu->enmEffAddrMode)
8493 {
8494 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
8495 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
8496 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
8497 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8498 }
8499 return VINF_SUCCESS;
8500}
8501
8502
8503/** Opcode 0xab. */
8504FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
8505{
8506 IEMOP_HLP_NO_LOCK_PREFIX();
8507
8508 /*
8509 * Use the C implementation if a repeat prefix is encountered.
8510 */
8511 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8512 {
8513 IEMOP_MNEMONIC("rep stos Yv,rAX");
8514 switch (pIemCpu->enmEffOpSize)
8515 {
8516 case IEMMODE_16BIT:
8517 switch (pIemCpu->enmEffAddrMode)
8518 {
8519 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
8520 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
8521 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
8522 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8523 }
8524 break;
8525 case IEMMODE_32BIT:
8526 switch (pIemCpu->enmEffAddrMode)
8527 {
8528 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
8529 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
8530 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
8531 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8532 }
8533 case IEMMODE_64BIT:
8534 switch (pIemCpu->enmEffAddrMode)
8535 {
8536 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8537 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
8538 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
8539 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8540 }
8541 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8542 }
8543 }
8544 IEMOP_MNEMONIC("stos Yv,rAX");
8545
8546 /*
8547 * Annoying double switch here.
8548 * Using ugly macro for implementing the cases, sharing it with stosb.
8549 */
8550 switch (pIemCpu->enmEffOpSize)
8551 {
8552 case IEMMODE_16BIT:
8553 switch (pIemCpu->enmEffAddrMode)
8554 {
8555 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
8556 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
8557 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
8558 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8559 }
8560 break;
8561
8562 case IEMMODE_32BIT:
8563 switch (pIemCpu->enmEffAddrMode)
8564 {
8565 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
8566 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
8567 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
8568 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8569 }
8570 break;
8571
8572 case IEMMODE_64BIT:
8573 switch (pIemCpu->enmEffAddrMode)
8574 {
8575 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8576 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
8577 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
8578 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8579 }
8580 break;
8581 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8582 }
8583 return VINF_SUCCESS;
8584}
8585
8586#undef IEM_STOS_CASE
8587
8588/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
8589#define IEM_LODS_CASE(ValBits, AddrBits) \
8590 IEM_MC_BEGIN(0, 2); \
8591 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8592 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8593 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8594 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8595 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
8596 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8597 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8598 } IEM_MC_ELSE() { \
8599 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8600 } IEM_MC_ENDIF(); \
8601 IEM_MC_ADVANCE_RIP(); \
8602 IEM_MC_END();
8603
8604/** Opcode 0xac. */
8605FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
8606{
8607 IEMOP_HLP_NO_LOCK_PREFIX();
8608
8609 /*
8610 * Use the C implementation if a repeat prefix is encountered.
8611 */
8612 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8613 {
8614 IEMOP_MNEMONIC("rep lodsb al,Xb");
8615 switch (pIemCpu->enmEffAddrMode)
8616 {
8617 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
8618 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
8619 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
8620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8621 }
8622 }
8623 IEMOP_MNEMONIC("lodsb al,Xb");
8624
8625 /*
8626 * Sharing case implementation with stos[wdq] below.
8627 */
8628 switch (pIemCpu->enmEffAddrMode)
8629 {
8630 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
8631 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
8632 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
8633 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8634 }
8635 return VINF_SUCCESS;
8636}
8637
8638
8639/** Opcode 0xad. */
8640FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
8641{
8642 IEMOP_HLP_NO_LOCK_PREFIX();
8643
8644 /*
8645 * Use the C implementation if a repeat prefix is encountered.
8646 */
8647 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8648 {
8649 IEMOP_MNEMONIC("rep lods rAX,Xv");
8650 switch (pIemCpu->enmEffOpSize)
8651 {
8652 case IEMMODE_16BIT:
8653 switch (pIemCpu->enmEffAddrMode)
8654 {
8655 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
8656 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
8657 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
8658 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8659 }
8660 break;
8661 case IEMMODE_32BIT:
8662 switch (pIemCpu->enmEffAddrMode)
8663 {
8664 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
8665 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
8666 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
8667 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8668 }
8669 case IEMMODE_64BIT:
8670 switch (pIemCpu->enmEffAddrMode)
8671 {
8672 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8673 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
8674 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
8675 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8676 }
8677 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8678 }
8679 }
8680 IEMOP_MNEMONIC("lods rAX,Xv");
8681
8682 /*
8683 * Annoying double switch here.
8684 * Using ugly macro for implementing the cases, sharing it with lodsb.
8685 */
8686 switch (pIemCpu->enmEffOpSize)
8687 {
8688 case IEMMODE_16BIT:
8689 switch (pIemCpu->enmEffAddrMode)
8690 {
8691 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
8692 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
8693 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
8694 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8695 }
8696 break;
8697
8698 case IEMMODE_32BIT:
8699 switch (pIemCpu->enmEffAddrMode)
8700 {
8701 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
8702 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
8703 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
8704 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8705 }
8706 break;
8707
8708 case IEMMODE_64BIT:
8709 switch (pIemCpu->enmEffAddrMode)
8710 {
8711 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8712 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
8713 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
8714 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8715 }
8716 break;
8717 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8718 }
8719 return VINF_SUCCESS;
8720}
8721
8722#undef IEM_LODS_CASE
8723
8724/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
8725#define IEM_SCAS_CASE(ValBits, AddrBits) \
8726 IEM_MC_BEGIN(3, 2); \
8727 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
8728 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
8729 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8730 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8731 \
8732 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8733 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
8734 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
8735 IEM_MC_REF_EFLAGS(pEFlags); \
8736 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
8737 \
8738 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8739 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8740 } IEM_MC_ELSE() { \
8741 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8742 } IEM_MC_ENDIF(); \
8743 IEM_MC_ADVANCE_RIP(); \
8744 IEM_MC_END();
8745
8746/** Opcode 0xae. */
8747FNIEMOP_DEF(iemOp_scasb_AL_Xb)
8748{
8749 IEMOP_HLP_NO_LOCK_PREFIX();
8750
8751 /*
8752 * Use the C implementation if a repeat prefix is encountered.
8753 */
8754 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8755 {
8756 IEMOP_MNEMONIC("repe scasb al,Xb");
8757 switch (pIemCpu->enmEffAddrMode)
8758 {
8759 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
8760 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
8761 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
8762 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8763 }
8764 }
8765 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8766 {
8767 IEMOP_MNEMONIC("repne scasb al,Xb");
8768 switch (pIemCpu->enmEffAddrMode)
8769 {
8770 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
8771 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
8772 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
8773 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8774 }
8775 }
8776 IEMOP_MNEMONIC("scasb al,Xb");
8777
8778 /*
8779 * Sharing case implementation with stos[wdq] below.
8780 */
8781 switch (pIemCpu->enmEffAddrMode)
8782 {
8783 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
8784 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
8785 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
8786 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8787 }
8788 return VINF_SUCCESS;
8789}
8790
8791
8792/** Opcode 0xaf. */
8793FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
8794{
8795 IEMOP_HLP_NO_LOCK_PREFIX();
8796
8797 /*
8798 * Use the C implementation if a repeat prefix is encountered.
8799 */
8800 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8801 {
8802 IEMOP_MNEMONIC("repe scas rAX,Xv");
8803 switch (pIemCpu->enmEffOpSize)
8804 {
8805 case IEMMODE_16BIT:
8806 switch (pIemCpu->enmEffAddrMode)
8807 {
8808 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8809 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8810 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8811 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8812 }
8813 break;
8814 case IEMMODE_32BIT:
8815 switch (pIemCpu->enmEffAddrMode)
8816 {
8817 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8818 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8819 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8820 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8821 }
8822 case IEMMODE_64BIT:
8823 switch (pIemCpu->enmEffAddrMode)
8824 {
8825 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
8826 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8827 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8828 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8829 }
8830 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8831 }
8832 }
8833 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8834 {
8835 IEMOP_MNEMONIC("repne scas rAX,Xv");
8836 switch (pIemCpu->enmEffOpSize)
8837 {
8838 case IEMMODE_16BIT:
8839 switch (pIemCpu->enmEffAddrMode)
8840 {
8841 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8842 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8843 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8844 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8845 }
8846 break;
8847 case IEMMODE_32BIT:
8848 switch (pIemCpu->enmEffAddrMode)
8849 {
8850 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8851 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8852 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8853 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8854 }
8855 case IEMMODE_64BIT:
8856 switch (pIemCpu->enmEffAddrMode)
8857 {
8858 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8859 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8860 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8861 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8862 }
8863 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8864 }
8865 }
8866 IEMOP_MNEMONIC("scas rAX,Xv");
8867
8868 /*
8869 * Annoying double switch here.
8870 * Using ugly macro for implementing the cases, sharing it with scasb.
8871 */
8872 switch (pIemCpu->enmEffOpSize)
8873 {
8874 case IEMMODE_16BIT:
8875 switch (pIemCpu->enmEffAddrMode)
8876 {
8877 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
8878 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
8879 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
8880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8881 }
8882 break;
8883
8884 case IEMMODE_32BIT:
8885 switch (pIemCpu->enmEffAddrMode)
8886 {
8887 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
8888 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
8889 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
8890 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8891 }
8892 break;
8893
8894 case IEMMODE_64BIT:
8895 switch (pIemCpu->enmEffAddrMode)
8896 {
8897 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8898 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
8899 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
8900 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8901 }
8902 break;
8903 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8904 }
8905 return VINF_SUCCESS;
8906}
8907
8908#undef IEM_SCAS_CASE
8909
8910/**
8911 * Common 'mov r8, imm8' helper.
8912 */
8913FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
8914{
8915 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8916 IEMOP_HLP_NO_LOCK_PREFIX();
8917
8918 IEM_MC_BEGIN(0, 1);
8919 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
8920 IEM_MC_STORE_GREG_U8(iReg, u8Value);
8921 IEM_MC_ADVANCE_RIP();
8922 IEM_MC_END();
8923
8924 return VINF_SUCCESS;
8925}
8926
8927
8928/** Opcode 0xb0. */
8929FNIEMOP_DEF(iemOp_mov_AL_Ib)
8930{
8931 IEMOP_MNEMONIC("mov AL,Ib");
8932 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX);
8933}
8934
8935
8936/** Opcode 0xb1. */
8937FNIEMOP_DEF(iemOp_CL_Ib)
8938{
8939 IEMOP_MNEMONIC("mov CL,Ib");
8940 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX);
8941}
8942
8943
8944/** Opcode 0xb2. */
8945FNIEMOP_DEF(iemOp_DL_Ib)
8946{
8947 IEMOP_MNEMONIC("mov DL,Ib");
8948 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX);
8949}
8950
8951
8952/** Opcode 0xb3. */
8953FNIEMOP_DEF(iemOp_BL_Ib)
8954{
8955 IEMOP_MNEMONIC("mov BL,Ib");
8956 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX);
8957}
8958
8959
8960/** Opcode 0xb4. */
8961FNIEMOP_DEF(iemOp_mov_AH_Ib)
8962{
8963 IEMOP_MNEMONIC("mov AH,Ib");
8964 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP);
8965}
8966
8967
8968/** Opcode 0xb5. */
8969FNIEMOP_DEF(iemOp_CH_Ib)
8970{
8971 IEMOP_MNEMONIC("mov CH,Ib");
8972 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP);
8973}
8974
8975
8976/** Opcode 0xb6. */
8977FNIEMOP_DEF(iemOp_DH_Ib)
8978{
8979 IEMOP_MNEMONIC("mov DH,Ib");
8980 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI);
8981}
8982
8983
8984/** Opcode 0xb7. */
8985FNIEMOP_DEF(iemOp_BH_Ib)
8986{
8987 IEMOP_MNEMONIC("mov BH,Ib");
8988 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI);
8989}
8990
8991
8992/**
8993 * Common 'mov regX,immX' helper.
8994 */
8995FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
8996{
8997 switch (pIemCpu->enmEffOpSize)
8998 {
8999 case IEMMODE_16BIT:
9000 {
9001 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9002 IEMOP_HLP_NO_LOCK_PREFIX();
9003
9004 IEM_MC_BEGIN(0, 1);
9005 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
9006 IEM_MC_STORE_GREG_U16(iReg, u16Value);
9007 IEM_MC_ADVANCE_RIP();
9008 IEM_MC_END();
9009 break;
9010 }
9011
9012 case IEMMODE_32BIT:
9013 {
9014 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9015 IEMOP_HLP_NO_LOCK_PREFIX();
9016
9017 IEM_MC_BEGIN(0, 1);
9018 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
9019 IEM_MC_STORE_GREG_U32(iReg, u32Value);
9020 IEM_MC_ADVANCE_RIP();
9021 IEM_MC_END();
9022 break;
9023 }
9024 case IEMMODE_64BIT:
9025 {
9026 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9027 IEMOP_HLP_NO_LOCK_PREFIX();
9028
9029 IEM_MC_BEGIN(0, 1);
9030 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
9031 IEM_MC_STORE_GREG_U64(iReg, u64Value);
9032 IEM_MC_ADVANCE_RIP();
9033 IEM_MC_END();
9034 break;
9035 }
9036 }
9037
9038 return VINF_SUCCESS;
9039}
9040
9041
9042/** Opcode 0xb8. */
9043FNIEMOP_DEF(iemOp_eAX_Iv)
9044{
9045 IEMOP_MNEMONIC("mov rAX,IV");
9046 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX);
9047}
9048
9049
9050/** Opcode 0xb9. */
9051FNIEMOP_DEF(iemOp_eCX_Iv)
9052{
9053 IEMOP_MNEMONIC("mov rCX,IV");
9054 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX);
9055}
9056
9057
9058/** Opcode 0xba. */
9059FNIEMOP_DEF(iemOp_eDX_Iv)
9060{
9061 IEMOP_MNEMONIC("mov rDX,IV");
9062 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX);
9063}
9064
9065
9066/** Opcode 0xbb. */
9067FNIEMOP_DEF(iemOp_eBX_Iv)
9068{
9069 IEMOP_MNEMONIC("mov rBX,IV");
9070 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX);
9071}
9072
9073
9074/** Opcode 0xbc. */
9075FNIEMOP_DEF(iemOp_eSP_Iv)
9076{
9077 IEMOP_MNEMONIC("mov rSP,IV");
9078 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP);
9079}
9080
9081
9082/** Opcode 0xbd. */
9083FNIEMOP_DEF(iemOp_eBP_Iv)
9084{
9085 IEMOP_MNEMONIC("mov rBP,IV");
9086 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP);
9087}
9088
9089
9090/** Opcode 0xbe. */
9091FNIEMOP_DEF(iemOp_eSI_Iv)
9092{
9093 IEMOP_MNEMONIC("mov rSI,IV");
9094 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI);
9095}
9096
9097
9098/** Opcode 0xbf. */
9099FNIEMOP_DEF(iemOp_eDI_Iv)
9100{
9101 IEMOP_MNEMONIC("mov rDI,IV");
9102 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI);
9103}
9104
9105
9106/** Opcode 0xc0. */
9107FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
9108{
9109 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9110 PCIEMOPSHIFTSIZES pImpl;
9111 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9112 {
9113 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
9114 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
9115 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
9116 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
9117 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
9118 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
9119 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
9120 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9121 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9122 }
9123 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9124
9125 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9126 {
9127 /* register */
9128 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9129 IEMOP_HLP_NO_LOCK_PREFIX();
9130 IEM_MC_BEGIN(3, 0);
9131 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9132 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9133 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9134 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9135 IEM_MC_REF_EFLAGS(pEFlags);
9136 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9137 IEM_MC_ADVANCE_RIP();
9138 IEM_MC_END();
9139 }
9140 else
9141 {
9142 /* memory */
9143 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9144 IEM_MC_BEGIN(3, 2);
9145 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9146 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9147 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9148 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9149
9150 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9151 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9152 IEM_MC_ASSIGN(cShiftArg, cShift);
9153 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9154 IEM_MC_FETCH_EFLAGS(EFlags);
9155 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9156
9157 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9158 IEM_MC_COMMIT_EFLAGS(EFlags);
9159 IEM_MC_ADVANCE_RIP();
9160 IEM_MC_END();
9161 }
9162 return VINF_SUCCESS;
9163}
9164
9165
9166/** Opcode 0xc1. */
9167FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
9168{
9169 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9170 PCIEMOPSHIFTSIZES pImpl;
9171 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9172 {
9173 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
9174 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
9175 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
9176 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
9177 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
9178 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
9179 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
9180 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9181 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9182 }
9183 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9184
9185 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9186 {
9187 /* register */
9188 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9189 IEMOP_HLP_NO_LOCK_PREFIX();
9190 switch (pIemCpu->enmEffOpSize)
9191 {
9192 case IEMMODE_16BIT:
9193 IEM_MC_BEGIN(3, 0);
9194 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9195 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9196 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9197 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9198 IEM_MC_REF_EFLAGS(pEFlags);
9199 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9200 IEM_MC_ADVANCE_RIP();
9201 IEM_MC_END();
9202 return VINF_SUCCESS;
9203
9204 case IEMMODE_32BIT:
9205 IEM_MC_BEGIN(3, 0);
9206 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9207 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9208 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9209 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9210 IEM_MC_REF_EFLAGS(pEFlags);
9211 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9212 IEM_MC_ADVANCE_RIP();
9213 IEM_MC_END();
9214 return VINF_SUCCESS;
9215
9216 case IEMMODE_64BIT:
9217 IEM_MC_BEGIN(3, 0);
9218 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9219 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9220 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9221 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9222 IEM_MC_REF_EFLAGS(pEFlags);
9223 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9224 IEM_MC_ADVANCE_RIP();
9225 IEM_MC_END();
9226 return VINF_SUCCESS;
9227
9228 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9229 }
9230 }
9231 else
9232 {
9233 /* memory */
9234 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9235 switch (pIemCpu->enmEffOpSize)
9236 {
9237 case IEMMODE_16BIT:
9238 IEM_MC_BEGIN(3, 2);
9239 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9240 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9241 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9242 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9243
9244 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9245 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9246 IEM_MC_ASSIGN(cShiftArg, cShift);
9247 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9248 IEM_MC_FETCH_EFLAGS(EFlags);
9249 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9250
9251 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9252 IEM_MC_COMMIT_EFLAGS(EFlags);
9253 IEM_MC_ADVANCE_RIP();
9254 IEM_MC_END();
9255 return VINF_SUCCESS;
9256
9257 case IEMMODE_32BIT:
9258 IEM_MC_BEGIN(3, 2);
9259 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9260 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9261 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9262 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9263
9264 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9265 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9266 IEM_MC_ASSIGN(cShiftArg, cShift);
9267 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9268 IEM_MC_FETCH_EFLAGS(EFlags);
9269 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9270
9271 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9272 IEM_MC_COMMIT_EFLAGS(EFlags);
9273 IEM_MC_ADVANCE_RIP();
9274 IEM_MC_END();
9275 return VINF_SUCCESS;
9276
9277 case IEMMODE_64BIT:
9278 IEM_MC_BEGIN(3, 2);
9279 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9280 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9281 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9282 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9283
9284 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9285 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9286 IEM_MC_ASSIGN(cShiftArg, cShift);
9287 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9288 IEM_MC_FETCH_EFLAGS(EFlags);
9289 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9290
9291 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9292 IEM_MC_COMMIT_EFLAGS(EFlags);
9293 IEM_MC_ADVANCE_RIP();
9294 IEM_MC_END();
9295 return VINF_SUCCESS;
9296
9297 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9298 }
9299 }
9300}
9301
9302
9303/** Opcode 0xc2. */
9304FNIEMOP_DEF(iemOp_retn_Iw)
9305{
9306 IEMOP_MNEMONIC("retn Iw");
9307 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9308 IEMOP_HLP_NO_LOCK_PREFIX();
9309 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9310 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
9311}
9312
9313
9314/** Opcode 0xc3. */
9315FNIEMOP_DEF(iemOp_retn)
9316{
9317 IEMOP_MNEMONIC("retn");
9318 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9319 IEMOP_HLP_NO_LOCK_PREFIX();
9320 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
9321}
9322
9323
9324/** Opcode 0xc4. */
9325FNIEMOP_DEF(iemOp_les_Gv_Mp)
9326{
9327 IEMOP_MNEMONIC("les Gv,Mp");
9328 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_ES);
9329}
9330
9331
9332/** Opcode 0xc5. */
9333FNIEMOP_DEF(iemOp_lds_Gv_Mp)
9334{
9335 IEMOP_MNEMONIC("lds Gv,Mp");
9336 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_DS);
9337}
9338
9339
9340/** Opcode 0xc6. */
9341FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
9342{
9343 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9344 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9345 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9346 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9347 IEMOP_MNEMONIC("mov Eb,Ib");
9348
9349 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9350 {
9351 /* register access */
9352 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9353 IEM_MC_BEGIN(0, 0);
9354 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
9355 IEM_MC_ADVANCE_RIP();
9356 IEM_MC_END();
9357 }
9358 else
9359 {
9360 /* memory access. */
9361 IEM_MC_BEGIN(0, 1);
9362 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9363 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9364 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9365 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
9366 IEM_MC_ADVANCE_RIP();
9367 IEM_MC_END();
9368 }
9369 return VINF_SUCCESS;
9370}
9371
9372
9373/** Opcode 0xc7. */
9374FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
9375{
9376 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9377 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9378 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9379 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9380 IEMOP_MNEMONIC("mov Ev,Iz");
9381
9382 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9383 {
9384 /* register access */
9385 switch (pIemCpu->enmEffOpSize)
9386 {
9387 case IEMMODE_16BIT:
9388 IEM_MC_BEGIN(0, 0);
9389 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9390 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
9391 IEM_MC_ADVANCE_RIP();
9392 IEM_MC_END();
9393 return VINF_SUCCESS;
9394
9395 case IEMMODE_32BIT:
9396 IEM_MC_BEGIN(0, 0);
9397 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9398 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
9399 IEM_MC_ADVANCE_RIP();
9400 IEM_MC_END();
9401 return VINF_SUCCESS;
9402
9403 case IEMMODE_64BIT:
9404 IEM_MC_BEGIN(0, 0);
9405 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9406 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
9407 IEM_MC_ADVANCE_RIP();
9408 IEM_MC_END();
9409 return VINF_SUCCESS;
9410
9411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9412 }
9413 }
9414 else
9415 {
9416 /* memory access. */
9417 switch (pIemCpu->enmEffOpSize)
9418 {
9419 case IEMMODE_16BIT:
9420 IEM_MC_BEGIN(0, 1);
9421 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9422 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9423 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9424 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
9425 IEM_MC_ADVANCE_RIP();
9426 IEM_MC_END();
9427 return VINF_SUCCESS;
9428
9429 case IEMMODE_32BIT:
9430 IEM_MC_BEGIN(0, 1);
9431 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9432 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9433 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9434 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
9435 IEM_MC_ADVANCE_RIP();
9436 IEM_MC_END();
9437 return VINF_SUCCESS;
9438
9439 case IEMMODE_64BIT:
9440 IEM_MC_BEGIN(0, 1);
9441 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9442 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9443 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9444 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
9445 IEM_MC_ADVANCE_RIP();
9446 IEM_MC_END();
9447 return VINF_SUCCESS;
9448
9449 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9450 }
9451 }
9452}
9453
9454
9455
9456
9457/** Opcode 0xc8. */
9458FNIEMOP_STUB(iemOp_enter_Iw_Ib);
9459
9460
9461/** Opcode 0xc9. */
9462FNIEMOP_DEF(iemOp_leave)
9463{
9464 IEMOP_MNEMONIC("retn");
9465 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9466 IEMOP_HLP_NO_LOCK_PREFIX();
9467 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
9468}
9469
9470
9471/** Opcode 0xca. */
9472FNIEMOP_DEF(iemOp_retf_Iw)
9473{
9474 IEMOP_MNEMONIC("retf Iw");
9475 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9476 IEMOP_HLP_NO_LOCK_PREFIX();
9477 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9478 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
9479}
9480
9481
9482/** Opcode 0xcb. */
9483FNIEMOP_DEF(iemOp_retf)
9484{
9485 IEMOP_MNEMONIC("retf");
9486 IEMOP_HLP_NO_LOCK_PREFIX();
9487 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9488 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
9489}
9490
9491
9492/** Opcode 0xcc. */
9493FNIEMOP_DEF(iemOp_int_3)
9494{
9495 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
9496}
9497
9498
9499/** Opcode 0xcd. */
9500FNIEMOP_DEF(iemOp_int_Ib)
9501{
9502 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
9503 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
9504}
9505
9506
9507/** Opcode 0xce. */
9508FNIEMOP_DEF(iemOp_into)
9509{
9510 IEM_MC_BEGIN(2, 0);
9511 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
9512 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
9513 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
9514 IEM_MC_END();
9515 return VINF_SUCCESS;
9516}
9517
9518
9519/** Opcode 0xcf. */
9520FNIEMOP_DEF(iemOp_iret)
9521{
9522 IEMOP_MNEMONIC("iret");
9523 IEMOP_HLP_NO_LOCK_PREFIX();
9524 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
9525}
9526
9527
9528/** Opcode 0xd0. */
9529FNIEMOP_DEF(iemOp_Grp2_Eb_1)
9530{
9531 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9532 PCIEMOPSHIFTSIZES pImpl;
9533 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9534 {
9535 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
9536 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
9537 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
9538 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
9539 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
9540 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
9541 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
9542 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9543 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9544 }
9545 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9546
9547 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9548 {
9549 /* register */
9550 IEMOP_HLP_NO_LOCK_PREFIX();
9551 IEM_MC_BEGIN(3, 0);
9552 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9553 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9554 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9555 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9556 IEM_MC_REF_EFLAGS(pEFlags);
9557 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9558 IEM_MC_ADVANCE_RIP();
9559 IEM_MC_END();
9560 }
9561 else
9562 {
9563 /* memory */
9564 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9565 IEM_MC_BEGIN(3, 2);
9566 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9567 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9568 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9569 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9570
9571 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9572 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9573 IEM_MC_FETCH_EFLAGS(EFlags);
9574 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9575
9576 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9577 IEM_MC_COMMIT_EFLAGS(EFlags);
9578 IEM_MC_ADVANCE_RIP();
9579 IEM_MC_END();
9580 }
9581 return VINF_SUCCESS;
9582}
9583
9584
9585
9586/** Opcode 0xd1. */
9587FNIEMOP_DEF(iemOp_Grp2_Ev_1)
9588{
9589 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9590 PCIEMOPSHIFTSIZES pImpl;
9591 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9592 {
9593 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
9594 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
9595 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
9596 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
9597 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
9598 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
9599 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
9600 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9601 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9602 }
9603 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9604
9605 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9606 {
9607 /* register */
9608 IEMOP_HLP_NO_LOCK_PREFIX();
9609 switch (pIemCpu->enmEffOpSize)
9610 {
9611 case IEMMODE_16BIT:
9612 IEM_MC_BEGIN(3, 0);
9613 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9614 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9615 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9616 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9617 IEM_MC_REF_EFLAGS(pEFlags);
9618 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9619 IEM_MC_ADVANCE_RIP();
9620 IEM_MC_END();
9621 return VINF_SUCCESS;
9622
9623 case IEMMODE_32BIT:
9624 IEM_MC_BEGIN(3, 0);
9625 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9626 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9627 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9628 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9629 IEM_MC_REF_EFLAGS(pEFlags);
9630 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9631 IEM_MC_ADVANCE_RIP();
9632 IEM_MC_END();
9633 return VINF_SUCCESS;
9634
9635 case IEMMODE_64BIT:
9636 IEM_MC_BEGIN(3, 0);
9637 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9638 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9639 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9640 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9641 IEM_MC_REF_EFLAGS(pEFlags);
9642 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9643 IEM_MC_ADVANCE_RIP();
9644 IEM_MC_END();
9645 return VINF_SUCCESS;
9646
9647 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9648 }
9649 }
9650 else
9651 {
9652 /* memory */
9653 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9654 switch (pIemCpu->enmEffOpSize)
9655 {
9656 case IEMMODE_16BIT:
9657 IEM_MC_BEGIN(3, 2);
9658 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9659 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9660 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9661 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9662
9663 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9664 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9665 IEM_MC_FETCH_EFLAGS(EFlags);
9666 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9667
9668 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9669 IEM_MC_COMMIT_EFLAGS(EFlags);
9670 IEM_MC_ADVANCE_RIP();
9671 IEM_MC_END();
9672 return VINF_SUCCESS;
9673
9674 case IEMMODE_32BIT:
9675 IEM_MC_BEGIN(3, 2);
9676 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9677 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9678 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9679 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9680
9681 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9682 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9683 IEM_MC_FETCH_EFLAGS(EFlags);
9684 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9685
9686 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9687 IEM_MC_COMMIT_EFLAGS(EFlags);
9688 IEM_MC_ADVANCE_RIP();
9689 IEM_MC_END();
9690 return VINF_SUCCESS;
9691
9692 case IEMMODE_64BIT:
9693 IEM_MC_BEGIN(3, 2);
9694 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9695 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9696 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9697 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9698
9699 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9700 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9701 IEM_MC_FETCH_EFLAGS(EFlags);
9702 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9703
9704 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9705 IEM_MC_COMMIT_EFLAGS(EFlags);
9706 IEM_MC_ADVANCE_RIP();
9707 IEM_MC_END();
9708 return VINF_SUCCESS;
9709
9710 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9711 }
9712 }
9713}
9714
9715
9716/** Opcode 0xd2. */
9717FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
9718{
9719 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9720 PCIEMOPSHIFTSIZES pImpl;
9721 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9722 {
9723 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
9724 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
9725 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
9726 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
9727 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
9728 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
9729 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
9730 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9731 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
9732 }
9733 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9734
9735 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9736 {
9737 /* register */
9738 IEMOP_HLP_NO_LOCK_PREFIX();
9739 IEM_MC_BEGIN(3, 0);
9740 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9741 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9742 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9743 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9744 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9745 IEM_MC_REF_EFLAGS(pEFlags);
9746 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9747 IEM_MC_ADVANCE_RIP();
9748 IEM_MC_END();
9749 }
9750 else
9751 {
9752 /* memory */
9753 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9754 IEM_MC_BEGIN(3, 2);
9755 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9756 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9757 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9758 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9759
9760 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9761 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9762 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9763 IEM_MC_FETCH_EFLAGS(EFlags);
9764 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9765
9766 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9767 IEM_MC_COMMIT_EFLAGS(EFlags);
9768 IEM_MC_ADVANCE_RIP();
9769 IEM_MC_END();
9770 }
9771 return VINF_SUCCESS;
9772}
9773
9774
9775/** Opcode 0xd3. */
9776FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
9777{
9778 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9779 PCIEMOPSHIFTSIZES pImpl;
9780 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9781 {
9782 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
9783 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
9784 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
9785 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
9786 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
9787 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
9788 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
9789 case 6: return IEMOP_RAISE_INVALID_OPCODE();
9790 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9791 }
9792 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9793
9794 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9795 {
9796 /* register */
9797 IEMOP_HLP_NO_LOCK_PREFIX();
9798 switch (pIemCpu->enmEffOpSize)
9799 {
9800 case IEMMODE_16BIT:
9801 IEM_MC_BEGIN(3, 0);
9802 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9803 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9804 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9805 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9806 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9807 IEM_MC_REF_EFLAGS(pEFlags);
9808 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9809 IEM_MC_ADVANCE_RIP();
9810 IEM_MC_END();
9811 return VINF_SUCCESS;
9812
9813 case IEMMODE_32BIT:
9814 IEM_MC_BEGIN(3, 0);
9815 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9816 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9817 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9818 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9819 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9820 IEM_MC_REF_EFLAGS(pEFlags);
9821 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9822 IEM_MC_ADVANCE_RIP();
9823 IEM_MC_END();
9824 return VINF_SUCCESS;
9825
9826 case IEMMODE_64BIT:
9827 IEM_MC_BEGIN(3, 0);
9828 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9829 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9830 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9831 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9832 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9833 IEM_MC_REF_EFLAGS(pEFlags);
9834 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9835 IEM_MC_ADVANCE_RIP();
9836 IEM_MC_END();
9837 return VINF_SUCCESS;
9838
9839 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9840 }
9841 }
9842 else
9843 {
9844 /* memory */
9845 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9846 switch (pIemCpu->enmEffOpSize)
9847 {
9848 case IEMMODE_16BIT:
9849 IEM_MC_BEGIN(3, 2);
9850 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9851 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9852 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9853 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9854
9855 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9856 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9857 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9858 IEM_MC_FETCH_EFLAGS(EFlags);
9859 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9860
9861 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9862 IEM_MC_COMMIT_EFLAGS(EFlags);
9863 IEM_MC_ADVANCE_RIP();
9864 IEM_MC_END();
9865 return VINF_SUCCESS;
9866
9867 case IEMMODE_32BIT:
9868 IEM_MC_BEGIN(3, 2);
9869 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9870 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9871 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9872 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9873
9874 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9875 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9876 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9877 IEM_MC_FETCH_EFLAGS(EFlags);
9878 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9879
9880 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9881 IEM_MC_COMMIT_EFLAGS(EFlags);
9882 IEM_MC_ADVANCE_RIP();
9883 IEM_MC_END();
9884 return VINF_SUCCESS;
9885
9886 case IEMMODE_64BIT:
9887 IEM_MC_BEGIN(3, 2);
9888 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9889 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9890 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9891 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9892
9893 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9894 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9895 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9896 IEM_MC_FETCH_EFLAGS(EFlags);
9897 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9898
9899 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9900 IEM_MC_COMMIT_EFLAGS(EFlags);
9901 IEM_MC_ADVANCE_RIP();
9902 IEM_MC_END();
9903 return VINF_SUCCESS;
9904
9905 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9906 }
9907 }
9908}
9909
9910/** Opcode 0xd4. */
9911FNIEMOP_DEF(iemOp_aam_Ib)
9912{
9913 IEMOP_MNEMONIC("aam Ib");
9914 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
9915 IEMOP_HLP_NO_LOCK_PREFIX();
9916 IEMOP_HLP_NO_64BIT();
9917 if (!bImm)
9918 return IEMOP_RAISE_DIVIDE_ERROR();
9919 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aam, bImm);
9920}
9921
9922
9923/** Opcode 0xd5. */
9924FNIEMOP_DEF(iemOp_aad_Ib)
9925{
9926 IEMOP_MNEMONIC("aad Ib");
9927 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
9928 IEMOP_HLP_NO_LOCK_PREFIX();
9929 IEMOP_HLP_NO_64BIT();
9930 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aad, bImm);
9931}
9932
9933
9934/** Opcode 0xd7. */
9935FNIEMOP_DEF(iemOp_xlat)
9936{
9937 IEMOP_MNEMONIC("xlat");
9938 IEMOP_HLP_NO_LOCK_PREFIX();
9939 switch (pIemCpu->enmEffAddrMode)
9940 {
9941 case IEMMODE_16BIT:
9942 IEM_MC_BEGIN(2, 0);
9943 IEM_MC_LOCAL(uint8_t, u8Tmp);
9944 IEM_MC_LOCAL(uint16_t, u16Addr);
9945 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
9946 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
9947 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
9948 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9949 IEM_MC_ADVANCE_RIP();
9950 IEM_MC_END();
9951 return VINF_SUCCESS;
9952
9953 case IEMMODE_32BIT:
9954 IEM_MC_BEGIN(2, 0);
9955 IEM_MC_LOCAL(uint8_t, u8Tmp);
9956 IEM_MC_LOCAL(uint32_t, u32Addr);
9957 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
9958 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
9959 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
9960 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9961 IEM_MC_ADVANCE_RIP();
9962 IEM_MC_END();
9963 return VINF_SUCCESS;
9964
9965 case IEMMODE_64BIT:
9966 IEM_MC_BEGIN(2, 0);
9967 IEM_MC_LOCAL(uint8_t, u8Tmp);
9968 IEM_MC_LOCAL(uint64_t, u64Addr);
9969 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
9970 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
9971 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
9972 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9973 IEM_MC_ADVANCE_RIP();
9974 IEM_MC_END();
9975 return VINF_SUCCESS;
9976
9977 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9978 }
9979}
9980
9981
9982/** Opcode 0xd8. */
9983FNIEMOP_STUB(iemOp_EscF0);
9984/** Opcode 0xd9. */
9985FNIEMOP_STUB(iemOp_EscF1);
9986/** Opcode 0xda. */
9987FNIEMOP_STUB(iemOp_EscF2);
9988
9989
9990/** Opcode 0xdb /0. */
9991FNIEMOP_STUB_1(iemOp_fild_dw, uint8_t, bRm);
9992/** Opcode 0xdb /1. */
9993FNIEMOP_STUB_1(iemOp_fisttp_dw, uint8_t, bRm);
9994/** Opcode 0xdb /2. */
9995FNIEMOP_STUB_1(iemOp_fist_dw, uint8_t, bRm);
9996/** Opcode 0xdb /3. */
9997FNIEMOP_STUB_1(iemOp_fistp_dw, uint8_t, bRm);
9998/** Opcode 0xdb /5. */
9999FNIEMOP_STUB_1(iemOp_fld_xr, uint8_t, bRm);
10000/** Opcode 0xdb /7. */
10001FNIEMOP_STUB_1(iemOp_fstp_xr, uint8_t, bRm);
10002
10003
10004/** Opcode 0xdb 0xe0. */
10005FNIEMOP_DEF(iemOp_fneni)
10006{
10007 IEMOP_MNEMONIC("fneni (8087/ign)");
10008 IEM_MC_BEGIN(0,0);
10009 IEM_MC_ADVANCE_RIP();
10010 IEM_MC_END();
10011 return VINF_SUCCESS;
10012}
10013
10014
10015/** Opcode 0xdb 0xe1. */
10016FNIEMOP_DEF(iemOp_fndisi)
10017{
10018 IEMOP_MNEMONIC("fndisi (8087/ign)");
10019 IEM_MC_BEGIN(0,0);
10020 IEM_MC_ADVANCE_RIP();
10021 IEM_MC_END();
10022 return VINF_SUCCESS;
10023}
10024
10025
10026/** Opcode 0xdb 0xe2. */
10027FNIEMOP_STUB(iemOp_fnclex);
10028
10029
10030/** Opcode 0xdb 0xe3. */
10031FNIEMOP_DEF(iemOp_fninit)
10032{
10033 IEMOP_MNEMONIC("fninit");
10034 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
10035}
10036
10037
10038/** Opcode 0xdb 0xe4. */
10039FNIEMOP_DEF(iemOp_fnsetpm)
10040{
10041 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
10042 IEM_MC_BEGIN(0,0);
10043 IEM_MC_ADVANCE_RIP();
10044 IEM_MC_END();
10045 return VINF_SUCCESS;
10046}
10047
10048
10049/** Opcode 0xdb 0xe5. */
10050FNIEMOP_DEF(iemOp_frstpm)
10051{
10052 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
10053 IEM_MC_BEGIN(0,0);
10054 IEM_MC_ADVANCE_RIP();
10055 IEM_MC_END();
10056 return VINF_SUCCESS;
10057}
10058
10059
10060/** Opcode 0xdb. */
10061FNIEMOP_DEF(iemOp_EscF3)
10062{
10063 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10064 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10065 {
10066 switch (bRm & 0xf8)
10067 {
10068 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnb
10069 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovne
10070 case 0xd0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnbe
10071 case 0xd8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnu
10072 case 0xe0:
10073 IEMOP_HLP_NO_LOCK_PREFIX();
10074 switch (bRm)
10075 {
10076 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
10077 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
10078 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
10079 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
10080 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
10081 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
10082 default: return IEMOP_RAISE_INVALID_OPCODE();
10083 }
10084 break;
10085 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomi
10086 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomi
10087 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
10088 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10089 }
10090 }
10091 else
10092 {
10093 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10094 {
10095 case 0: return FNIEMOP_CALL_1(iemOp_fild_dw, bRm);
10096 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_dw,bRm);
10097 case 2: return FNIEMOP_CALL_1(iemOp_fist_dw, bRm);
10098 case 3: return FNIEMOP_CALL_1(iemOp_fistp_dw, bRm);
10099 case 4: return IEMOP_RAISE_INVALID_OPCODE();
10100 case 5: return FNIEMOP_CALL_1(iemOp_fld_xr, bRm);
10101 case 6: return IEMOP_RAISE_INVALID_OPCODE();
10102 case 7: return FNIEMOP_CALL_1(iemOp_fstp_xr, bRm);
10103 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10104 }
10105 }
10106}
10107
10108/** Opcode 0xdc. */
10109FNIEMOP_STUB(iemOp_EscF4);
10110/** Opcode 0xdd. */
10111FNIEMOP_STUB(iemOp_EscF5);
10112
10113/** Opcode 0xde 0xd9. */
10114FNIEMOP_STUB(iemOp_fcompp);
10115
10116/** Opcode 0xde. */
10117FNIEMOP_DEF(iemOp_EscF6)
10118{
10119 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10120 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10121 {
10122 switch (bRm & 0xf8)
10123 {
10124 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fiaddp
10125 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fimulp
10126 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
10127 case 0xd8:
10128 switch (bRm)
10129 {
10130 case 0xd9: return FNIEMOP_CALL(iemOp_fcompp);
10131 default: return IEMOP_RAISE_INVALID_OPCODE();
10132 }
10133 case 0xe0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubrp
10134 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubp
10135 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivrp
10136 case 0xf8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivp
10137 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10138 }
10139 }
10140 else
10141 {
10142#if 0
10143 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10144 {
10145 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_w, bRm);
10146 case 1: return FNIEMOP_CALL_1(iemOp_fimul_w, bRm);
10147 case 2: return FNIEMOP_CALL_1(iemOp_ficom_w, bRm);
10148 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_w, bRm);
10149 case 4: return FNIEMOP_CALL_1(iemOp_fisub_w, bRm);
10150 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_w, bRm);
10151 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_w, bRm);
10152 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_w, bRm);
10153 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10154 }
10155#endif
10156 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
10157 }
10158}
10159
10160
10161/** Opcode 0xdf 0xe0. */
10162FNIEMOP_DEF(iemOp_fnstsw_ax)
10163{
10164 IEMOP_MNEMONIC("fnstsw ax");
10165 IEMOP_HLP_NO_LOCK_PREFIX();
10166
10167 IEM_MC_BEGIN(0, 1);
10168 IEM_MC_LOCAL(uint16_t, u16Tmp);
10169 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10170 IEM_MC_FETCH_FSW(u16Tmp);
10171 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
10172 IEM_MC_ADVANCE_RIP();
10173 IEM_MC_END();
10174 return VINF_SUCCESS;
10175}
10176
10177
10178/** Opcode 0xdf. */
10179FNIEMOP_DEF(iemOp_EscF7)
10180{
10181 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10182 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10183 {
10184 switch (bRm & 0xf8)
10185 {
10186 case 0xc0: return IEMOP_RAISE_INVALID_OPCODE();
10187 case 0xc8: return IEMOP_RAISE_INVALID_OPCODE();
10188 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
10189 case 0xd8: return IEMOP_RAISE_INVALID_OPCODE();
10190 case 0xe0:
10191 switch (bRm)
10192 {
10193 case 0xe0: return FNIEMOP_CALL(iemOp_fnstsw_ax);
10194 default: return IEMOP_RAISE_INVALID_OPCODE();
10195 }
10196 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomip
10197 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomip
10198 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
10199 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10200 }
10201 }
10202 else
10203 {
10204 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
10205 }
10206}
10207
10208
10209/** Opcode 0xe0. */
10210FNIEMOP_DEF(iemOp_loopne_Jb)
10211{
10212 IEMOP_MNEMONIC("loopne Jb");
10213 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10214 IEMOP_HLP_NO_LOCK_PREFIX();
10215 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10216
10217 switch (pIemCpu->enmEffAddrMode)
10218 {
10219 case IEMMODE_16BIT:
10220 IEM_MC_BEGIN(0,0);
10221 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10222 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10223 IEM_MC_REL_JMP_S8(i8Imm);
10224 } IEM_MC_ELSE() {
10225 IEM_MC_ADVANCE_RIP();
10226 } IEM_MC_ENDIF();
10227 IEM_MC_END();
10228 return VINF_SUCCESS;
10229
10230 case IEMMODE_32BIT:
10231 IEM_MC_BEGIN(0,0);
10232 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10233 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10234 IEM_MC_REL_JMP_S8(i8Imm);
10235 } IEM_MC_ELSE() {
10236 IEM_MC_ADVANCE_RIP();
10237 } IEM_MC_ENDIF();
10238 IEM_MC_END();
10239 return VINF_SUCCESS;
10240
10241 case IEMMODE_64BIT:
10242 IEM_MC_BEGIN(0,0);
10243 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10244 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10245 IEM_MC_REL_JMP_S8(i8Imm);
10246 } IEM_MC_ELSE() {
10247 IEM_MC_ADVANCE_RIP();
10248 } IEM_MC_ENDIF();
10249 IEM_MC_END();
10250 return VINF_SUCCESS;
10251
10252 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10253 }
10254}
10255
10256
10257/** Opcode 0xe1. */
10258FNIEMOP_DEF(iemOp_loope_Jb)
10259{
10260 IEMOP_MNEMONIC("loope Jb");
10261 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10262 IEMOP_HLP_NO_LOCK_PREFIX();
10263 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10264
10265 switch (pIemCpu->enmEffAddrMode)
10266 {
10267 case IEMMODE_16BIT:
10268 IEM_MC_BEGIN(0,0);
10269 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10270 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10271 IEM_MC_REL_JMP_S8(i8Imm);
10272 } IEM_MC_ELSE() {
10273 IEM_MC_ADVANCE_RIP();
10274 } IEM_MC_ENDIF();
10275 IEM_MC_END();
10276 return VINF_SUCCESS;
10277
10278 case IEMMODE_32BIT:
10279 IEM_MC_BEGIN(0,0);
10280 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10281 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10282 IEM_MC_REL_JMP_S8(i8Imm);
10283 } IEM_MC_ELSE() {
10284 IEM_MC_ADVANCE_RIP();
10285 } IEM_MC_ENDIF();
10286 IEM_MC_END();
10287 return VINF_SUCCESS;
10288
10289 case IEMMODE_64BIT:
10290 IEM_MC_BEGIN(0,0);
10291 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10292 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10293 IEM_MC_REL_JMP_S8(i8Imm);
10294 } IEM_MC_ELSE() {
10295 IEM_MC_ADVANCE_RIP();
10296 } IEM_MC_ENDIF();
10297 IEM_MC_END();
10298 return VINF_SUCCESS;
10299
10300 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10301 }
10302}
10303
10304
10305/** Opcode 0xe2. */
10306FNIEMOP_DEF(iemOp_loop_Jb)
10307{
10308 IEMOP_MNEMONIC("loop Jb");
10309 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10310 IEMOP_HLP_NO_LOCK_PREFIX();
10311 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10312
10313 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
10314 * using the 32-bit operand size override. How can that be restarted? See
10315 * weird pseudo code in intel manual. */
10316 switch (pIemCpu->enmEffAddrMode)
10317 {
10318 case IEMMODE_16BIT:
10319 IEM_MC_BEGIN(0,0);
10320 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10321 IEM_MC_IF_CX_IS_NZ() {
10322 IEM_MC_REL_JMP_S8(i8Imm);
10323 } IEM_MC_ELSE() {
10324 IEM_MC_ADVANCE_RIP();
10325 } IEM_MC_ENDIF();
10326 IEM_MC_END();
10327 return VINF_SUCCESS;
10328
10329 case IEMMODE_32BIT:
10330 IEM_MC_BEGIN(0,0);
10331 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10332 IEM_MC_IF_ECX_IS_NZ() {
10333 IEM_MC_REL_JMP_S8(i8Imm);
10334 } IEM_MC_ELSE() {
10335 IEM_MC_ADVANCE_RIP();
10336 } IEM_MC_ENDIF();
10337 IEM_MC_END();
10338 return VINF_SUCCESS;
10339
10340 case IEMMODE_64BIT:
10341 IEM_MC_BEGIN(0,0);
10342 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10343 IEM_MC_IF_RCX_IS_NZ() {
10344 IEM_MC_REL_JMP_S8(i8Imm);
10345 } IEM_MC_ELSE() {
10346 IEM_MC_ADVANCE_RIP();
10347 } IEM_MC_ENDIF();
10348 IEM_MC_END();
10349 return VINF_SUCCESS;
10350
10351 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10352 }
10353}
10354
10355
10356/** Opcode 0xe3. */
10357FNIEMOP_DEF(iemOp_jecxz_Jb)
10358{
10359 IEMOP_MNEMONIC("jecxz Jb");
10360 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10361 IEMOP_HLP_NO_LOCK_PREFIX();
10362 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10363
10364 switch (pIemCpu->enmEffAddrMode)
10365 {
10366 case IEMMODE_16BIT:
10367 IEM_MC_BEGIN(0,0);
10368 IEM_MC_IF_CX_IS_NZ() {
10369 IEM_MC_ADVANCE_RIP();
10370 } IEM_MC_ELSE() {
10371 IEM_MC_REL_JMP_S8(i8Imm);
10372 } IEM_MC_ENDIF();
10373 IEM_MC_END();
10374 return VINF_SUCCESS;
10375
10376 case IEMMODE_32BIT:
10377 IEM_MC_BEGIN(0,0);
10378 IEM_MC_IF_ECX_IS_NZ() {
10379 IEM_MC_ADVANCE_RIP();
10380 } IEM_MC_ELSE() {
10381 IEM_MC_REL_JMP_S8(i8Imm);
10382 } IEM_MC_ENDIF();
10383 IEM_MC_END();
10384 return VINF_SUCCESS;
10385
10386 case IEMMODE_64BIT:
10387 IEM_MC_BEGIN(0,0);
10388 IEM_MC_IF_RCX_IS_NZ() {
10389 IEM_MC_ADVANCE_RIP();
10390 } IEM_MC_ELSE() {
10391 IEM_MC_REL_JMP_S8(i8Imm);
10392 } IEM_MC_ENDIF();
10393 IEM_MC_END();
10394 return VINF_SUCCESS;
10395
10396 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10397 }
10398}
10399
10400
10401/** Opcode 0xe4 */
10402FNIEMOP_DEF(iemOp_in_AL_Ib)
10403{
10404 IEMOP_MNEMONIC("in eAX,Ib");
10405 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10406 IEMOP_HLP_NO_LOCK_PREFIX();
10407 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
10408}
10409
10410
10411/** Opcode 0xe5 */
10412FNIEMOP_DEF(iemOp_in_eAX_Ib)
10413{
10414 IEMOP_MNEMONIC("in eAX,Ib");
10415 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10416 IEMOP_HLP_NO_LOCK_PREFIX();
10417 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10418}
10419
10420
10421/** Opcode 0xe6 */
10422FNIEMOP_DEF(iemOp_out_Ib_AL)
10423{
10424 IEMOP_MNEMONIC("out Ib,AL");
10425 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10426 IEMOP_HLP_NO_LOCK_PREFIX();
10427 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
10428}
10429
10430
10431/** Opcode 0xe7 */
10432FNIEMOP_DEF(iemOp_out_Ib_eAX)
10433{
10434 IEMOP_MNEMONIC("out Ib,eAX");
10435 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10436 IEMOP_HLP_NO_LOCK_PREFIX();
10437 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10438}
10439
10440
10441/** Opcode 0xe8. */
10442FNIEMOP_DEF(iemOp_call_Jv)
10443{
10444 IEMOP_MNEMONIC("call Jv");
10445 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10446 switch (pIemCpu->enmEffOpSize)
10447 {
10448 case IEMMODE_16BIT:
10449 {
10450 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10451 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int16_t)u16Imm);
10452 }
10453
10454 case IEMMODE_32BIT:
10455 {
10456 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10457 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
10458 }
10459
10460 case IEMMODE_64BIT:
10461 {
10462 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10463 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
10464 }
10465
10466 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10467 }
10468}
10469
10470
10471/** Opcode 0xe9. */
10472FNIEMOP_DEF(iemOp_jmp_Jv)
10473{
10474 IEMOP_MNEMONIC("jmp Jv");
10475 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10476 switch (pIemCpu->enmEffOpSize)
10477 {
10478 case IEMMODE_16BIT:
10479 {
10480 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
10481 IEM_MC_BEGIN(0, 0);
10482 IEM_MC_REL_JMP_S16(i16Imm);
10483 IEM_MC_END();
10484 return VINF_SUCCESS;
10485 }
10486
10487 case IEMMODE_64BIT:
10488 case IEMMODE_32BIT:
10489 {
10490 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
10491 IEM_MC_BEGIN(0, 0);
10492 IEM_MC_REL_JMP_S32(i32Imm);
10493 IEM_MC_END();
10494 return VINF_SUCCESS;
10495 }
10496
10497 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10498 }
10499}
10500
10501
10502/** Opcode 0xea. */
10503FNIEMOP_DEF(iemOp_jmp_Ap)
10504{
10505 IEMOP_MNEMONIC("jmp Ap");
10506 IEMOP_HLP_NO_64BIT();
10507
10508 /* Decode the far pointer address and pass it on to the far call C implementation. */
10509 uint32_t offSeg;
10510 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10511 IEM_OPCODE_GET_NEXT_U32(&offSeg);
10512 else
10513 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
10514 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
10515 IEMOP_HLP_NO_LOCK_PREFIX();
10516 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
10517}
10518
10519
10520/** Opcode 0xeb. */
10521FNIEMOP_DEF(iemOp_jmp_Jb)
10522{
10523 IEMOP_MNEMONIC("jmp Jb");
10524 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10525 IEMOP_HLP_NO_LOCK_PREFIX();
10526 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10527
10528 IEM_MC_BEGIN(0, 0);
10529 IEM_MC_REL_JMP_S8(i8Imm);
10530 IEM_MC_END();
10531 return VINF_SUCCESS;
10532}
10533
10534
10535/** Opcode 0xec */
10536FNIEMOP_DEF(iemOp_in_AL_DX)
10537{
10538 IEMOP_MNEMONIC("in AL,DX");
10539 IEMOP_HLP_NO_LOCK_PREFIX();
10540 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
10541}
10542
10543
10544/** Opcode 0xed */
10545FNIEMOP_DEF(iemOp_eAX_DX)
10546{
10547 IEMOP_MNEMONIC("in eAX,DX");
10548 IEMOP_HLP_NO_LOCK_PREFIX();
10549 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10550}
10551
10552
10553/** Opcode 0xee */
10554FNIEMOP_DEF(iemOp_out_DX_AL)
10555{
10556 IEMOP_MNEMONIC("out DX,AL");
10557 IEMOP_HLP_NO_LOCK_PREFIX();
10558 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
10559}
10560
10561
10562/** Opcode 0xef */
10563FNIEMOP_DEF(iemOp_out_DX_eAX)
10564{
10565 IEMOP_MNEMONIC("out DX,eAX");
10566 IEMOP_HLP_NO_LOCK_PREFIX();
10567 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10568}
10569
10570
10571/** Opcode 0xf0. */
10572FNIEMOP_DEF(iemOp_lock)
10573{
10574 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
10575
10576 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10577 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10578}
10579
10580
10581/** Opcode 0xf2. */
10582FNIEMOP_DEF(iemOp_repne)
10583{
10584 /* This overrides any previous REPE prefix. */
10585 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
10586 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
10587
10588 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10589 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10590}
10591
10592
10593/** Opcode 0xf3. */
10594FNIEMOP_DEF(iemOp_repe)
10595{
10596 /* This overrides any previous REPNE prefix. */
10597 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
10598 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
10599
10600 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10601 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10602}
10603
10604
10605/** Opcode 0xf4. */
10606FNIEMOP_DEF(iemOp_hlt)
10607{
10608 IEMOP_HLP_NO_LOCK_PREFIX();
10609 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
10610}
10611
10612
10613/** Opcode 0xf5. */
10614FNIEMOP_DEF(iemOp_cmc)
10615{
10616 IEMOP_MNEMONIC("cmc");
10617 IEMOP_HLP_NO_LOCK_PREFIX();
10618 IEM_MC_BEGIN(0, 0);
10619 IEM_MC_FLIP_EFL_BIT(X86_EFL_CF);
10620 IEM_MC_ADVANCE_RIP();
10621 IEM_MC_END();
10622 return VINF_SUCCESS;
10623}
10624
10625
10626/**
10627 * Common implementation of 'inc/dec/not/neg Eb'.
10628 *
10629 * @param bRm The RM byte.
10630 * @param pImpl The instruction implementation.
10631 */
10632FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10633{
10634 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10635 {
10636 /* register access */
10637 IEM_MC_BEGIN(2, 0);
10638 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10639 IEM_MC_ARG(uint32_t *, pEFlags, 1);
10640 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10641 IEM_MC_REF_EFLAGS(pEFlags);
10642 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10643 IEM_MC_ADVANCE_RIP();
10644 IEM_MC_END();
10645 }
10646 else
10647 {
10648 /* memory access. */
10649 IEM_MC_BEGIN(2, 2);
10650 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10651 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10652 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10653
10654 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10655 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10656 IEM_MC_FETCH_EFLAGS(EFlags);
10657 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10658 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10659 else
10660 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
10661
10662 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
10663 IEM_MC_COMMIT_EFLAGS(EFlags);
10664 IEM_MC_ADVANCE_RIP();
10665 IEM_MC_END();
10666 }
10667 return VINF_SUCCESS;
10668}
10669
10670
10671/**
10672 * Common implementation of 'inc/dec/not/neg Ev'.
10673 *
10674 * @param bRm The RM byte.
10675 * @param pImpl The instruction implementation.
10676 */
10677FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10678{
10679 /* Registers are handled by a common worker. */
10680 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10681 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10682
10683 /* Memory we do here. */
10684 switch (pIemCpu->enmEffOpSize)
10685 {
10686 case IEMMODE_16BIT:
10687 IEM_MC_BEGIN(2, 2);
10688 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10689 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10690 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10691
10692 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10693 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10694 IEM_MC_FETCH_EFLAGS(EFlags);
10695 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10696 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
10697 else
10698 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
10699
10700 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
10701 IEM_MC_COMMIT_EFLAGS(EFlags);
10702 IEM_MC_ADVANCE_RIP();
10703 IEM_MC_END();
10704 return VINF_SUCCESS;
10705
10706 case IEMMODE_32BIT:
10707 IEM_MC_BEGIN(2, 2);
10708 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10709 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10710 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10711
10712 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10713 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10714 IEM_MC_FETCH_EFLAGS(EFlags);
10715 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10716 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
10717 else
10718 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
10719
10720 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
10721 IEM_MC_COMMIT_EFLAGS(EFlags);
10722 IEM_MC_ADVANCE_RIP();
10723 IEM_MC_END();
10724 return VINF_SUCCESS;
10725
10726 case IEMMODE_64BIT:
10727 IEM_MC_BEGIN(2, 2);
10728 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10729 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10730 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10731
10732 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10733 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10734 IEM_MC_FETCH_EFLAGS(EFlags);
10735 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10736 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
10737 else
10738 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
10739
10740 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
10741 IEM_MC_COMMIT_EFLAGS(EFlags);
10742 IEM_MC_ADVANCE_RIP();
10743 IEM_MC_END();
10744 return VINF_SUCCESS;
10745
10746 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10747 }
10748}
10749
10750
10751/** Opcode 0xf6 /0. */
10752FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
10753{
10754 IEMOP_MNEMONIC("test Eb,Ib");
10755 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10756
10757 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10758 {
10759 /* register access */
10760 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10761 IEMOP_HLP_NO_LOCK_PREFIX();
10762
10763 IEM_MC_BEGIN(3, 0);
10764 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10765 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
10766 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10767 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10768 IEM_MC_REF_EFLAGS(pEFlags);
10769 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10770 IEM_MC_ADVANCE_RIP();
10771 IEM_MC_END();
10772 }
10773 else
10774 {
10775 /* memory access. */
10776 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10777
10778 IEM_MC_BEGIN(3, 2);
10779 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10780 IEM_MC_ARG(uint8_t, u8Src, 1);
10781 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10782 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10783
10784 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10785 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10786 IEM_MC_ASSIGN(u8Src, u8Imm);
10787 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10788 IEM_MC_FETCH_EFLAGS(EFlags);
10789 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10790
10791 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
10792 IEM_MC_COMMIT_EFLAGS(EFlags);
10793 IEM_MC_ADVANCE_RIP();
10794 IEM_MC_END();
10795 }
10796 return VINF_SUCCESS;
10797}
10798
10799
10800/** Opcode 0xf7 /0. */
10801FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
10802{
10803 IEMOP_MNEMONIC("test Ev,Iv");
10804 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10805 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10806
10807 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10808 {
10809 /* register access */
10810 switch (pIemCpu->enmEffOpSize)
10811 {
10812 case IEMMODE_16BIT:
10813 {
10814 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10815 IEM_MC_BEGIN(3, 0);
10816 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10817 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
10818 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10819 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10820 IEM_MC_REF_EFLAGS(pEFlags);
10821 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10822 IEM_MC_ADVANCE_RIP();
10823 IEM_MC_END();
10824 return VINF_SUCCESS;
10825 }
10826
10827 case IEMMODE_32BIT:
10828 {
10829 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10830 IEM_MC_BEGIN(3, 0);
10831 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10832 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
10833 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10834 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10835 IEM_MC_REF_EFLAGS(pEFlags);
10836 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10837 IEM_MC_ADVANCE_RIP();
10838 IEM_MC_END();
10839 return VINF_SUCCESS;
10840 }
10841
10842 case IEMMODE_64BIT:
10843 {
10844 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10845 IEM_MC_BEGIN(3, 0);
10846 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10847 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
10848 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10849 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10850 IEM_MC_REF_EFLAGS(pEFlags);
10851 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10852 IEM_MC_ADVANCE_RIP();
10853 IEM_MC_END();
10854 return VINF_SUCCESS;
10855 }
10856
10857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10858 }
10859 }
10860 else
10861 {
10862 /* memory access. */
10863 switch (pIemCpu->enmEffOpSize)
10864 {
10865 case IEMMODE_16BIT:
10866 {
10867 IEM_MC_BEGIN(3, 2);
10868 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10869 IEM_MC_ARG(uint16_t, u16Src, 1);
10870 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10871 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10872
10873 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10874 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10875 IEM_MC_ASSIGN(u16Src, u16Imm);
10876 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10877 IEM_MC_FETCH_EFLAGS(EFlags);
10878 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10879
10880 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
10881 IEM_MC_COMMIT_EFLAGS(EFlags);
10882 IEM_MC_ADVANCE_RIP();
10883 IEM_MC_END();
10884 return VINF_SUCCESS;
10885 }
10886
10887 case IEMMODE_32BIT:
10888 {
10889 IEM_MC_BEGIN(3, 2);
10890 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10891 IEM_MC_ARG(uint32_t, u32Src, 1);
10892 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10893 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10894
10895 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10896 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10897 IEM_MC_ASSIGN(u32Src, u32Imm);
10898 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10899 IEM_MC_FETCH_EFLAGS(EFlags);
10900 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10901
10902 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
10903 IEM_MC_COMMIT_EFLAGS(EFlags);
10904 IEM_MC_ADVANCE_RIP();
10905 IEM_MC_END();
10906 return VINF_SUCCESS;
10907 }
10908
10909 case IEMMODE_64BIT:
10910 {
10911 IEM_MC_BEGIN(3, 2);
10912 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10913 IEM_MC_ARG(uint64_t, u64Src, 1);
10914 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10915 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10916
10917 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10918 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10919 IEM_MC_ASSIGN(u64Src, u64Imm);
10920 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10921 IEM_MC_FETCH_EFLAGS(EFlags);
10922 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10923
10924 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
10925 IEM_MC_COMMIT_EFLAGS(EFlags);
10926 IEM_MC_ADVANCE_RIP();
10927 IEM_MC_END();
10928 return VINF_SUCCESS;
10929 }
10930
10931 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10932 }
10933 }
10934}
10935
10936
10937/** Opcode 0xf6 /4, /5, /6 and /7. */
10938FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
10939{
10940 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10941
10942 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10943 {
10944 /* register access */
10945 IEMOP_HLP_NO_LOCK_PREFIX();
10946 IEM_MC_BEGIN(3, 0);
10947 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10948 IEM_MC_ARG(uint8_t, u8Value, 1);
10949 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10950 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10951 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10952 IEM_MC_REF_EFLAGS(pEFlags);
10953 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10954 IEM_MC_ADVANCE_RIP();
10955 IEM_MC_END();
10956 }
10957 else
10958 {
10959 /* memory access. */
10960 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10961
10962 IEM_MC_BEGIN(3, 1);
10963 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10964 IEM_MC_ARG(uint8_t, u8Value, 1);
10965 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10966 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10967
10968 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10969 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
10970 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10971 IEM_MC_REF_EFLAGS(pEFlags);
10972 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10973
10974 IEM_MC_ADVANCE_RIP();
10975 IEM_MC_END();
10976 }
10977 return VINF_SUCCESS;
10978}
10979
10980
10981/** Opcode 0xf7 /4, /5, /6 and /7. */
10982FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
10983{
10984 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10985 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10986
10987 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10988 {
10989 /* register access */
10990 switch (pIemCpu->enmEffOpSize)
10991 {
10992 case IEMMODE_16BIT:
10993 {
10994 IEMOP_HLP_NO_LOCK_PREFIX();
10995 IEM_MC_BEGIN(4, 1);
10996 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10997 IEM_MC_ARG(uint16_t *, pu16DX, 1);
10998 IEM_MC_ARG(uint16_t, u16Value, 2);
10999 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11000 IEM_MC_LOCAL(int32_t, rc);
11001
11002 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11003 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11004 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
11005 IEM_MC_REF_EFLAGS(pEFlags);
11006 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
11007 IEM_MC_IF_LOCAL_IS_Z(rc) {
11008 IEM_MC_ADVANCE_RIP();
11009 } IEM_MC_ELSE() {
11010 IEM_MC_RAISE_DIVIDE_ERROR();
11011 } IEM_MC_ENDIF();
11012
11013 IEM_MC_END();
11014 return VINF_SUCCESS;
11015 }
11016
11017 case IEMMODE_32BIT:
11018 {
11019 IEMOP_HLP_NO_LOCK_PREFIX();
11020 IEM_MC_BEGIN(4, 1);
11021 IEM_MC_ARG(uint32_t *, pu32AX, 0);
11022 IEM_MC_ARG(uint32_t *, pu32DX, 1);
11023 IEM_MC_ARG(uint32_t, u32Value, 2);
11024 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11025 IEM_MC_LOCAL(int32_t, rc);
11026
11027 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11028 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
11029 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
11030 IEM_MC_REF_EFLAGS(pEFlags);
11031 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
11032 IEM_MC_IF_LOCAL_IS_Z(rc) {
11033 IEM_MC_ADVANCE_RIP();
11034 } IEM_MC_ELSE() {
11035 IEM_MC_RAISE_DIVIDE_ERROR();
11036 } IEM_MC_ENDIF();
11037
11038 IEM_MC_END();
11039 return VINF_SUCCESS;
11040 }
11041
11042 case IEMMODE_64BIT:
11043 {
11044 IEMOP_HLP_NO_LOCK_PREFIX();
11045 IEM_MC_BEGIN(4, 1);
11046 IEM_MC_ARG(uint64_t *, pu64AX, 0);
11047 IEM_MC_ARG(uint64_t *, pu64DX, 1);
11048 IEM_MC_ARG(uint64_t, u64Value, 2);
11049 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11050 IEM_MC_LOCAL(int32_t, rc);
11051
11052 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11053 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
11054 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
11055 IEM_MC_REF_EFLAGS(pEFlags);
11056 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
11057 IEM_MC_IF_LOCAL_IS_Z(rc) {
11058 IEM_MC_ADVANCE_RIP();
11059 } IEM_MC_ELSE() {
11060 IEM_MC_RAISE_DIVIDE_ERROR();
11061 } IEM_MC_ENDIF();
11062
11063 IEM_MC_END();
11064 return VINF_SUCCESS;
11065 }
11066
11067 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11068 }
11069 }
11070 else
11071 {
11072 /* memory access. */
11073 switch (pIemCpu->enmEffOpSize)
11074 {
11075 case IEMMODE_16BIT:
11076 {
11077 IEMOP_HLP_NO_LOCK_PREFIX();
11078 IEM_MC_BEGIN(4, 2);
11079 IEM_MC_ARG(uint16_t *, pu16AX, 0);
11080 IEM_MC_ARG(uint16_t *, pu16DX, 1);
11081 IEM_MC_ARG(uint16_t, u16Value, 2);
11082 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11083 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11084 IEM_MC_LOCAL(int32_t, rc);
11085
11086 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11087 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
11088 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11089 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
11090 IEM_MC_REF_EFLAGS(pEFlags);
11091 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
11092 IEM_MC_IF_LOCAL_IS_Z(rc) {
11093 IEM_MC_ADVANCE_RIP();
11094 } IEM_MC_ELSE() {
11095 IEM_MC_RAISE_DIVIDE_ERROR();
11096 } IEM_MC_ENDIF();
11097
11098 IEM_MC_END();
11099 return VINF_SUCCESS;
11100 }
11101
11102 case IEMMODE_32BIT:
11103 {
11104 IEMOP_HLP_NO_LOCK_PREFIX();
11105 IEM_MC_BEGIN(4, 2);
11106 IEM_MC_ARG(uint32_t *, pu32AX, 0);
11107 IEM_MC_ARG(uint32_t *, pu32DX, 1);
11108 IEM_MC_ARG(uint32_t, u32Value, 2);
11109 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11110 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11111 IEM_MC_LOCAL(int32_t, rc);
11112
11113 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11114 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
11115 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
11116 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
11117 IEM_MC_REF_EFLAGS(pEFlags);
11118 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
11119 IEM_MC_IF_LOCAL_IS_Z(rc) {
11120 IEM_MC_ADVANCE_RIP();
11121 } IEM_MC_ELSE() {
11122 IEM_MC_RAISE_DIVIDE_ERROR();
11123 } IEM_MC_ENDIF();
11124
11125 IEM_MC_END();
11126 return VINF_SUCCESS;
11127 }
11128
11129 case IEMMODE_64BIT:
11130 {
11131 IEMOP_HLP_NO_LOCK_PREFIX();
11132 IEM_MC_BEGIN(4, 2);
11133 IEM_MC_ARG(uint64_t *, pu64AX, 0);
11134 IEM_MC_ARG(uint64_t *, pu64DX, 1);
11135 IEM_MC_ARG(uint64_t, u64Value, 2);
11136 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11137 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11138 IEM_MC_LOCAL(int32_t, rc);
11139
11140 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11141 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
11142 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
11143 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
11144 IEM_MC_REF_EFLAGS(pEFlags);
11145 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
11146 IEM_MC_IF_LOCAL_IS_Z(rc) {
11147 IEM_MC_ADVANCE_RIP();
11148 } IEM_MC_ELSE() {
11149 IEM_MC_RAISE_DIVIDE_ERROR();
11150 } IEM_MC_ENDIF();
11151
11152 IEM_MC_END();
11153 return VINF_SUCCESS;
11154 }
11155
11156 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11157 }
11158 }
11159}
11160
11161/** Opcode 0xf6. */
11162FNIEMOP_DEF(iemOp_Grp3_Eb)
11163{
11164 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11165 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11166 {
11167 case 0:
11168 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
11169 case 1:
11170 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
11171 case 2:
11172 IEMOP_MNEMONIC("not Eb");
11173 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
11174 case 3:
11175 IEMOP_MNEMONIC("neg Eb");
11176 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
11177 case 4:
11178 IEMOP_MNEMONIC("mul Eb");
11179 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11180 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
11181 case 5:
11182 IEMOP_MNEMONIC("imul Eb");
11183 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11184 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
11185 case 6:
11186 IEMOP_MNEMONIC("div Eb");
11187 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11188 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
11189 case 7:
11190 IEMOP_MNEMONIC("idiv Eb");
11191 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11192 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
11193 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11194 }
11195}
11196
11197
11198/** Opcode 0xf7. */
11199FNIEMOP_DEF(iemOp_Grp3_Ev)
11200{
11201 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11202 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11203 {
11204 case 0:
11205 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
11206 case 1:
11207 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
11208 case 2:
11209 IEMOP_MNEMONIC("not Ev");
11210 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
11211 case 3:
11212 IEMOP_MNEMONIC("neg Ev");
11213 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
11214 case 4:
11215 IEMOP_MNEMONIC("mul Ev");
11216 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11217 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
11218 case 5:
11219 IEMOP_MNEMONIC("imul Ev");
11220 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11221 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
11222 case 6:
11223 IEMOP_MNEMONIC("div Ev");
11224 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11225 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
11226 case 7:
11227 IEMOP_MNEMONIC("idiv Ev");
11228 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11229 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
11230 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11231 }
11232}
11233
11234
11235/** Opcode 0xf8. */
11236FNIEMOP_DEF(iemOp_clc)
11237{
11238 IEMOP_MNEMONIC("clc");
11239 IEMOP_HLP_NO_LOCK_PREFIX();
11240 IEM_MC_BEGIN(0, 0);
11241 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
11242 IEM_MC_ADVANCE_RIP();
11243 IEM_MC_END();
11244 return VINF_SUCCESS;
11245}
11246
11247
11248/** Opcode 0xf9. */
11249FNIEMOP_DEF(iemOp_stc)
11250{
11251 IEMOP_MNEMONIC("stc");
11252 IEMOP_HLP_NO_LOCK_PREFIX();
11253 IEM_MC_BEGIN(0, 0);
11254 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
11255 IEM_MC_ADVANCE_RIP();
11256 IEM_MC_END();
11257 return VINF_SUCCESS;
11258}
11259
11260
11261/** Opcode 0xfa. */
11262FNIEMOP_DEF(iemOp_cli)
11263{
11264 IEMOP_MNEMONIC("cli");
11265 IEMOP_HLP_NO_LOCK_PREFIX();
11266 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
11267}
11268
11269
11270FNIEMOP_DEF(iemOp_sti)
11271{
11272 IEMOP_MNEMONIC("sti");
11273 IEMOP_HLP_NO_LOCK_PREFIX();
11274 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
11275}
11276
11277
11278/** Opcode 0xfc. */
11279FNIEMOP_DEF(iemOp_cld)
11280{
11281 IEMOP_MNEMONIC("cld");
11282 IEMOP_HLP_NO_LOCK_PREFIX();
11283 IEM_MC_BEGIN(0, 0);
11284 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
11285 IEM_MC_ADVANCE_RIP();
11286 IEM_MC_END();
11287 return VINF_SUCCESS;
11288}
11289
11290
11291/** Opcode 0xfd. */
11292FNIEMOP_DEF(iemOp_std)
11293{
11294 IEMOP_MNEMONIC("std");
11295 IEMOP_HLP_NO_LOCK_PREFIX();
11296 IEM_MC_BEGIN(0, 0);
11297 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
11298 IEM_MC_ADVANCE_RIP();
11299 IEM_MC_END();
11300 return VINF_SUCCESS;
11301}
11302
11303
11304/** Opcode 0xfe. */
11305FNIEMOP_DEF(iemOp_Grp4)
11306{
11307 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11308 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11309 {
11310 case 0:
11311 IEMOP_MNEMONIC("inc Ev");
11312 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
11313 case 1:
11314 IEMOP_MNEMONIC("dec Ev");
11315 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
11316 default:
11317 IEMOP_MNEMONIC("grp4-ud");
11318 return IEMOP_RAISE_INVALID_OPCODE();
11319 }
11320}
11321
11322
11323/**
11324 * Opcode 0xff /2.
11325 * @param bRm The RM byte.
11326 */
11327FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
11328{
11329 IEMOP_MNEMONIC("calln Ev");
11330 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11331 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11332
11333 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11334 {
11335 /* The new RIP is taken from a register. */
11336 switch (pIemCpu->enmEffOpSize)
11337 {
11338 case IEMMODE_16BIT:
11339 IEM_MC_BEGIN(1, 0);
11340 IEM_MC_ARG(uint16_t, u16Target, 0);
11341 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11342 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
11343 IEM_MC_END()
11344 return VINF_SUCCESS;
11345
11346 case IEMMODE_32BIT:
11347 IEM_MC_BEGIN(1, 0);
11348 IEM_MC_ARG(uint32_t, u32Target, 0);
11349 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11350 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
11351 IEM_MC_END()
11352 return VINF_SUCCESS;
11353
11354 case IEMMODE_64BIT:
11355 IEM_MC_BEGIN(1, 0);
11356 IEM_MC_ARG(uint64_t, u64Target, 0);
11357 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11358 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11359 IEM_MC_END()
11360 return VINF_SUCCESS;
11361
11362 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11363 }
11364 }
11365 else
11366 {
11367 /* The new RIP is taken from a register. */
11368 switch (pIemCpu->enmEffOpSize)
11369 {
11370 case IEMMODE_16BIT:
11371 IEM_MC_BEGIN(1, 1);
11372 IEM_MC_ARG(uint16_t, u16Target, 0);
11373 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11374 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11375 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11376 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
11377 IEM_MC_END()
11378 return VINF_SUCCESS;
11379
11380 case IEMMODE_32BIT:
11381 IEM_MC_BEGIN(1, 1);
11382 IEM_MC_ARG(uint32_t, u32Target, 0);
11383 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11384 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11385 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11386 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
11387 IEM_MC_END()
11388 return VINF_SUCCESS;
11389
11390 case IEMMODE_64BIT:
11391 IEM_MC_BEGIN(1, 1);
11392 IEM_MC_ARG(uint64_t, u64Target, 0);
11393 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11394 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11395 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11396 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11397 IEM_MC_END()
11398 return VINF_SUCCESS;
11399
11400 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11401 }
11402 }
11403}
11404
11405
11406/**
11407 * Opcode 0xff /3.
11408 * @param bRm The RM byte.
11409 */
11410FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
11411{
11412 IEMOP_MNEMONIC("callf Ep");
11413 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11414
11415 /* Registers? How?? */
11416 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11417 {
11418 /** @todo How the heck does a 'callf eax' work? Probably just have to
11419 * search the docs... */
11420 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11421 }
11422
11423 /* Far pointer loaded from memory. */
11424 switch (pIemCpu->enmEffOpSize)
11425 {
11426 case IEMMODE_16BIT:
11427 IEM_MC_BEGIN(3, 1);
11428 IEM_MC_ARG(uint16_t, u16Sel, 0);
11429 IEM_MC_ARG(uint16_t, offSeg, 1);
11430 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11431 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11432 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11433 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11434 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11435 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11436 IEM_MC_END();
11437 return VINF_SUCCESS;
11438
11439 case IEMMODE_32BIT:
11440 IEM_MC_BEGIN(3, 1);
11441 IEM_MC_ARG(uint16_t, u16Sel, 0);
11442 IEM_MC_ARG(uint32_t, offSeg, 1);
11443 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11444 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11445 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11446 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11447 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11448 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11449 IEM_MC_END();
11450 return VINF_SUCCESS;
11451
11452 case IEMMODE_64BIT:
11453 IEM_MC_BEGIN(3, 1);
11454 IEM_MC_ARG(uint16_t, u16Sel, 0);
11455 IEM_MC_ARG(uint64_t, offSeg, 1);
11456 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11457 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11458 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11459 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11460 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11461 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11462 IEM_MC_END();
11463 return VINF_SUCCESS;
11464
11465 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11466 }
11467}
11468
11469
11470/**
11471 * Opcode 0xff /4.
11472 * @param bRm The RM byte.
11473 */
11474FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
11475{
11476 IEMOP_MNEMONIC("jmpn Ev");
11477 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11478 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11479
11480 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11481 {
11482 /* The new RIP is taken from a register. */
11483 switch (pIemCpu->enmEffOpSize)
11484 {
11485 case IEMMODE_16BIT:
11486 IEM_MC_BEGIN(0, 1);
11487 IEM_MC_LOCAL(uint16_t, u16Target);
11488 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11489 IEM_MC_SET_RIP_U16(u16Target);
11490 IEM_MC_END()
11491 return VINF_SUCCESS;
11492
11493 case IEMMODE_32BIT:
11494 IEM_MC_BEGIN(0, 1);
11495 IEM_MC_LOCAL(uint32_t, u32Target);
11496 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11497 IEM_MC_SET_RIP_U32(u32Target);
11498 IEM_MC_END()
11499 return VINF_SUCCESS;
11500
11501 case IEMMODE_64BIT:
11502 IEM_MC_BEGIN(0, 1);
11503 IEM_MC_LOCAL(uint64_t, u64Target);
11504 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11505 IEM_MC_SET_RIP_U64(u64Target);
11506 IEM_MC_END()
11507 return VINF_SUCCESS;
11508
11509 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11510 }
11511 }
11512 else
11513 {
11514 /* The new RIP is taken from a register. */
11515 switch (pIemCpu->enmEffOpSize)
11516 {
11517 case IEMMODE_16BIT:
11518 IEM_MC_BEGIN(0, 2);
11519 IEM_MC_LOCAL(uint16_t, u16Target);
11520 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11521 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11522 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11523 IEM_MC_SET_RIP_U16(u16Target);
11524 IEM_MC_END()
11525 return VINF_SUCCESS;
11526
11527 case IEMMODE_32BIT:
11528 IEM_MC_BEGIN(0, 2);
11529 IEM_MC_LOCAL(uint32_t, u32Target);
11530 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11531 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11532 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11533 IEM_MC_SET_RIP_U32(u32Target);
11534 IEM_MC_END()
11535 return VINF_SUCCESS;
11536
11537 case IEMMODE_64BIT:
11538 IEM_MC_BEGIN(0, 2);
11539 IEM_MC_LOCAL(uint32_t, u32Target);
11540 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11541 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11542 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11543 IEM_MC_SET_RIP_U32(u32Target);
11544 IEM_MC_END()
11545 return VINF_SUCCESS;
11546
11547 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11548 }
11549 }
11550}
11551
11552
11553/**
11554 * Opcode 0xff /5.
11555 * @param bRm The RM byte.
11556 */
11557FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
11558{
11559 IEMOP_MNEMONIC("jmp Ep");
11560 IEMOP_HLP_NO_64BIT();
11561 /** @todo could share all the decoding with iemOp_Grp5_callf_Ep. */
11562
11563 /* Decode the far pointer address and pass it on to the far call C
11564 implementation. */
11565 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11566 {
11567 /** @todo How the heck does a 'callf eax' work? Probably just have to
11568 * search the docs... */
11569 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11570 }
11571
11572 /* Far pointer loaded from memory. */
11573 switch (pIemCpu->enmEffOpSize)
11574 {
11575 case IEMMODE_16BIT:
11576 IEM_MC_BEGIN(3, 1);
11577 IEM_MC_ARG(uint16_t, u16Sel, 0);
11578 IEM_MC_ARG(uint16_t, offSeg, 1);
11579 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11580 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11581 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11582 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11583 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11584 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11585 IEM_MC_END();
11586 return VINF_SUCCESS;
11587
11588 case IEMMODE_32BIT:
11589 IEM_MC_BEGIN(3, 1);
11590 IEM_MC_ARG(uint16_t, u16Sel, 0);
11591 IEM_MC_ARG(uint32_t, offSeg, 1);
11592 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11593 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11594 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11595 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11596 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11597 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11598 IEM_MC_END();
11599 return VINF_SUCCESS;
11600
11601 case IEMMODE_64BIT:
11602 IEM_MC_BEGIN(3, 1);
11603 IEM_MC_ARG(uint16_t, u16Sel, 0);
11604 IEM_MC_ARG(uint64_t, offSeg, 1);
11605 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11606 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11607 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11608 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11609 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11610 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11611 IEM_MC_END();
11612 return VINF_SUCCESS;
11613
11614 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11615 }
11616}
11617
11618
11619/**
11620 * Opcode 0xff /6.
11621 * @param bRm The RM byte.
11622 */
11623FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
11624{
11625 IEMOP_MNEMONIC("push Ev");
11626 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11627
11628 /* Registers are handled by a common worker. */
11629 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11630 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11631
11632 /* Memory we do here. */
11633 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11634 switch (pIemCpu->enmEffOpSize)
11635 {
11636 case IEMMODE_16BIT:
11637 IEM_MC_BEGIN(0, 2);
11638 IEM_MC_LOCAL(uint16_t, u16Src);
11639 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11640 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11641 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11642 IEM_MC_PUSH_U16(u16Src);
11643 IEM_MC_ADVANCE_RIP();
11644 IEM_MC_END();
11645 return VINF_SUCCESS;
11646
11647 case IEMMODE_32BIT:
11648 IEM_MC_BEGIN(0, 2);
11649 IEM_MC_LOCAL(uint32_t, u32Src);
11650 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11651 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11652 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11653 IEM_MC_PUSH_U32(u32Src);
11654 IEM_MC_ADVANCE_RIP();
11655 IEM_MC_END();
11656 return VINF_SUCCESS;
11657
11658 case IEMMODE_64BIT:
11659 IEM_MC_BEGIN(0, 2);
11660 IEM_MC_LOCAL(uint64_t, u64Src);
11661 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11662 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11663 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11664 IEM_MC_PUSH_U64(u64Src);
11665 IEM_MC_ADVANCE_RIP();
11666 IEM_MC_END();
11667 return VINF_SUCCESS;
11668 }
11669 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11670}
11671
11672
11673/** Opcode 0xff. */
11674FNIEMOP_DEF(iemOp_Grp5)
11675{
11676 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11677 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11678 {
11679 case 0:
11680 IEMOP_MNEMONIC("inc Ev");
11681 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
11682 case 1:
11683 IEMOP_MNEMONIC("dec Ev");
11684 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
11685 case 2:
11686 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
11687 case 3:
11688 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
11689 case 4:
11690 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
11691 case 5:
11692 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
11693 case 6:
11694 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
11695 case 7:
11696 IEMOP_MNEMONIC("grp5-ud");
11697 return IEMOP_RAISE_INVALID_OPCODE();
11698 }
11699 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
11700}
11701
11702
11703
11704const PFNIEMOP g_apfnOneByteMap[256] =
11705{
11706 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
11707 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
11708 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
11709 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
11710 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
11711 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
11712 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
11713 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
11714 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
11715 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
11716 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
11717 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
11718 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
11719 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
11720 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
11721 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
11722 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
11723 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
11724 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
11725 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
11726 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
11727 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
11728 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
11729 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
11730 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw,
11731 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
11732 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
11733 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
11734 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
11735 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
11736 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
11737 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
11738 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
11739 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
11740 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
11741 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_pop_Ev,
11742 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
11743 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
11744 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
11745 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
11746 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
11747 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
11748 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
11749 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
11750 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
11751 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
11752 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
11753 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
11754 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
11755 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
11756 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
11757 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
11758 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
11759 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
11760 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
11761 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
11762 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
11763 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
11764 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
11765 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
11766 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe, /** @todo 0xf1 is INT1 / ICEBP. */
11767 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
11768 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
11769 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
11770};
11771
11772
11773/** @} */
11774
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette