VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 48935

Last change on this file since 48935 was 47986, checked in by vboxsync, 11 years ago

IEM: Fixed BT so it doesn't generate any writes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 586.0 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 47986 2013-08-22 11:54:11Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 if (pImpl != &g_iemAImpl_test)
133 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
134 IEM_MC_ADVANCE_RIP();
135 IEM_MC_END();
136 break;
137
138 case IEMMODE_64BIT:
139 IEM_MC_BEGIN(3, 0);
140 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
141 IEM_MC_ARG(uint64_t, u64Src, 1);
142 IEM_MC_ARG(uint32_t *, pEFlags, 2);
143
144 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
145 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
146 IEM_MC_REF_EFLAGS(pEFlags);
147 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
148
149 IEM_MC_ADVANCE_RIP();
150 IEM_MC_END();
151 break;
152 }
153 }
154 else
155 {
156 /*
157 * We're accessing memory.
158 * Note! We're putting the eflags on the stack here so we can commit them
159 * after the memory.
160 */
161 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
162 switch (pIemCpu->enmEffOpSize)
163 {
164 case IEMMODE_16BIT:
165 IEM_MC_BEGIN(3, 2);
166 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
167 IEM_MC_ARG(uint16_t, u16Src, 1);
168 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
169 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
170
171 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
172 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
173 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
174 IEM_MC_FETCH_EFLAGS(EFlags);
175 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
177 else
178 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
179
180 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
181 IEM_MC_COMMIT_EFLAGS(EFlags);
182 IEM_MC_ADVANCE_RIP();
183 IEM_MC_END();
184 break;
185
186 case IEMMODE_32BIT:
187 IEM_MC_BEGIN(3, 2);
188 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
189 IEM_MC_ARG(uint32_t, u32Src, 1);
190 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
191 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
192
193 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
194 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
195 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
196 IEM_MC_FETCH_EFLAGS(EFlags);
197 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
199 else
200 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
201
202 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
203 IEM_MC_COMMIT_EFLAGS(EFlags);
204 IEM_MC_ADVANCE_RIP();
205 IEM_MC_END();
206 break;
207
208 case IEMMODE_64BIT:
209 IEM_MC_BEGIN(3, 2);
210 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
211 IEM_MC_ARG(uint64_t, u64Src, 1);
212 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
213 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
214
215 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
216 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
217 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
218 IEM_MC_FETCH_EFLAGS(EFlags);
219 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
221 else
222 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
223
224 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
225 IEM_MC_COMMIT_EFLAGS(EFlags);
226 IEM_MC_ADVANCE_RIP();
227 IEM_MC_END();
228 break;
229 }
230 }
231 return VINF_SUCCESS;
232}
233
234
235/**
236 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
237 * the destination.
238 *
239 * @param pImpl Pointer to the instruction implementation (assembly).
240 */
241FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
242{
243 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
244 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
245
246 /*
247 * If rm is denoting a register, no more instruction bytes.
248 */
249 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
250 {
251 IEM_MC_BEGIN(3, 0);
252 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
253 IEM_MC_ARG(uint8_t, u8Src, 1);
254 IEM_MC_ARG(uint32_t *, pEFlags, 2);
255
256 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
257 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
258 IEM_MC_REF_EFLAGS(pEFlags);
259 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
260
261 IEM_MC_ADVANCE_RIP();
262 IEM_MC_END();
263 }
264 else
265 {
266 /*
267 * We're accessing memory.
268 */
269 IEM_MC_BEGIN(3, 1);
270 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
271 IEM_MC_ARG(uint8_t, u8Src, 1);
272 IEM_MC_ARG(uint32_t *, pEFlags, 2);
273 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
274
275 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
276 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
277 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
278 IEM_MC_REF_EFLAGS(pEFlags);
279 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
280
281 IEM_MC_ADVANCE_RIP();
282 IEM_MC_END();
283 }
284 return VINF_SUCCESS;
285}
286
287
288/**
289 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
290 * register as the destination.
291 *
292 * @param pImpl Pointer to the instruction implementation (assembly).
293 */
294FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
295{
296 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
297 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
298
299 /*
300 * If rm is denoting a register, no more instruction bytes.
301 */
302 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
303 {
304 switch (pIemCpu->enmEffOpSize)
305 {
306 case IEMMODE_16BIT:
307 IEM_MC_BEGIN(3, 0);
308 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
309 IEM_MC_ARG(uint16_t, u16Src, 1);
310 IEM_MC_ARG(uint32_t *, pEFlags, 2);
311
312 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
313 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
314 IEM_MC_REF_EFLAGS(pEFlags);
315 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
316
317 IEM_MC_ADVANCE_RIP();
318 IEM_MC_END();
319 break;
320
321 case IEMMODE_32BIT:
322 IEM_MC_BEGIN(3, 0);
323 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
324 IEM_MC_ARG(uint32_t, u32Src, 1);
325 IEM_MC_ARG(uint32_t *, pEFlags, 2);
326
327 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
328 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
329 IEM_MC_REF_EFLAGS(pEFlags);
330 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
331
332 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
333 IEM_MC_ADVANCE_RIP();
334 IEM_MC_END();
335 break;
336
337 case IEMMODE_64BIT:
338 IEM_MC_BEGIN(3, 0);
339 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
340 IEM_MC_ARG(uint64_t, u64Src, 1);
341 IEM_MC_ARG(uint32_t *, pEFlags, 2);
342
343 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
344 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
345 IEM_MC_REF_EFLAGS(pEFlags);
346 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
347
348 IEM_MC_ADVANCE_RIP();
349 IEM_MC_END();
350 break;
351 }
352 }
353 else
354 {
355 /*
356 * We're accessing memory.
357 */
358 switch (pIemCpu->enmEffOpSize)
359 {
360 case IEMMODE_16BIT:
361 IEM_MC_BEGIN(3, 1);
362 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
363 IEM_MC_ARG(uint16_t, u16Src, 1);
364 IEM_MC_ARG(uint32_t *, pEFlags, 2);
365 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
366
367 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
368 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
369 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
370 IEM_MC_REF_EFLAGS(pEFlags);
371 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
372
373 IEM_MC_ADVANCE_RIP();
374 IEM_MC_END();
375 break;
376
377 case IEMMODE_32BIT:
378 IEM_MC_BEGIN(3, 1);
379 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
380 IEM_MC_ARG(uint32_t, u32Src, 1);
381 IEM_MC_ARG(uint32_t *, pEFlags, 2);
382 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
383
384 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
385 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
386 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
387 IEM_MC_REF_EFLAGS(pEFlags);
388 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
389
390 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
391 IEM_MC_ADVANCE_RIP();
392 IEM_MC_END();
393 break;
394
395 case IEMMODE_64BIT:
396 IEM_MC_BEGIN(3, 1);
397 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
398 IEM_MC_ARG(uint64_t, u64Src, 1);
399 IEM_MC_ARG(uint32_t *, pEFlags, 2);
400 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
401
402 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
403 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
404 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
405 IEM_MC_REF_EFLAGS(pEFlags);
406 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
407
408 IEM_MC_ADVANCE_RIP();
409 IEM_MC_END();
410 break;
411 }
412 }
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
419 * a byte immediate.
420 *
421 * @param pImpl Pointer to the instruction implementation (assembly).
422 */
423FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
424{
425 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
426 IEMOP_HLP_NO_LOCK_PREFIX();
427
428 IEM_MC_BEGIN(3, 0);
429 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
430 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
431 IEM_MC_ARG(uint32_t *, pEFlags, 2);
432
433 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
434 IEM_MC_REF_EFLAGS(pEFlags);
435 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
436
437 IEM_MC_ADVANCE_RIP();
438 IEM_MC_END();
439 return VINF_SUCCESS;
440}
441
442
443/**
444 * Common worker for instructions like ADD, AND, OR, ++ with working on
445 * AX/EAX/RAX with a word/dword immediate.
446 *
447 * @param pImpl Pointer to the instruction implementation (assembly).
448 */
449FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
450{
451 switch (pIemCpu->enmEffOpSize)
452 {
453 case IEMMODE_16BIT:
454 {
455 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
456 IEMOP_HLP_NO_LOCK_PREFIX();
457
458 IEM_MC_BEGIN(3, 0);
459 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
460 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
461 IEM_MC_ARG(uint32_t *, pEFlags, 2);
462
463 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
464 IEM_MC_REF_EFLAGS(pEFlags);
465 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
466
467 IEM_MC_ADVANCE_RIP();
468 IEM_MC_END();
469 return VINF_SUCCESS;
470 }
471
472 case IEMMODE_32BIT:
473 {
474 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
475 IEMOP_HLP_NO_LOCK_PREFIX();
476
477 IEM_MC_BEGIN(3, 0);
478 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
479 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
480 IEM_MC_ARG(uint32_t *, pEFlags, 2);
481
482 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
483 IEM_MC_REF_EFLAGS(pEFlags);
484 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
485
486 if (pImpl != &g_iemAImpl_test)
487 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
488 IEM_MC_ADVANCE_RIP();
489 IEM_MC_END();
490 return VINF_SUCCESS;
491 }
492
493 case IEMMODE_64BIT:
494 {
495 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
496 IEMOP_HLP_NO_LOCK_PREFIX();
497
498 IEM_MC_BEGIN(3, 0);
499 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
500 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
501 IEM_MC_ARG(uint32_t *, pEFlags, 2);
502
503 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
504 IEM_MC_REF_EFLAGS(pEFlags);
505 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
506
507 IEM_MC_ADVANCE_RIP();
508 IEM_MC_END();
509 return VINF_SUCCESS;
510 }
511
512 IEM_NOT_REACHED_DEFAULT_CASE_RET();
513 }
514}
515
516
517/** Opcodes 0xf1, 0xd6. */
518FNIEMOP_DEF(iemOp_Invalid)
519{
520 IEMOP_MNEMONIC("Invalid");
521 return IEMOP_RAISE_INVALID_OPCODE();
522}
523
524
525
526/** @name ..... opcodes.
527 *
528 * @{
529 */
530
531/** @} */
532
533
534/** @name Two byte opcodes (first byte 0x0f).
535 *
536 * @{
537 */
538
539/** Opcode 0x0f 0x00 /0. */
540FNIEMOP_DEF_1(iemOp_Grp6_sldt, uint8_t, bRm)
541{
542 IEMOP_MNEMONIC("sldt Rv/Mw");
543 IEMOP_HLP_NO_REAL_OR_V86_MODE();
544
545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
546 {
547 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
548 switch (pIemCpu->enmEffOpSize)
549 {
550 case IEMMODE_16BIT:
551 IEM_MC_BEGIN(0, 1);
552 IEM_MC_LOCAL(uint16_t, u16Ldtr);
553 IEM_MC_FETCH_LDTR_U16(u16Ldtr);
554 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Ldtr);
555 IEM_MC_ADVANCE_RIP();
556 IEM_MC_END();
557 break;
558
559 case IEMMODE_32BIT:
560 IEM_MC_BEGIN(0, 1);
561 IEM_MC_LOCAL(uint32_t, u32Ldtr);
562 IEM_MC_FETCH_LDTR_U32(u32Ldtr);
563 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Ldtr);
564 IEM_MC_ADVANCE_RIP();
565 IEM_MC_END();
566 break;
567
568 case IEMMODE_64BIT:
569 IEM_MC_BEGIN(0, 1);
570 IEM_MC_LOCAL(uint64_t, u64Ldtr);
571 IEM_MC_FETCH_LDTR_U64(u64Ldtr);
572 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Ldtr);
573 IEM_MC_ADVANCE_RIP();
574 IEM_MC_END();
575 break;
576
577 IEM_NOT_REACHED_DEFAULT_CASE_RET();
578 }
579 }
580 else
581 {
582 IEM_MC_BEGIN(0, 2);
583 IEM_MC_LOCAL(uint16_t, u16Ldtr);
584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
585 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
586 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
587 IEM_MC_FETCH_LDTR_U16(u16Ldtr);
588 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Ldtr);
589 IEM_MC_ADVANCE_RIP();
590 IEM_MC_END();
591 }
592 return VINF_SUCCESS;
593}
594
595
596/** Opcode 0x0f 0x00 /1. */
597FNIEMOP_DEF_1(iemOp_Grp6_str, uint8_t, bRm)
598{
599 IEMOP_MNEMONIC("str Rv/Mw");
600 IEMOP_HLP_NO_REAL_OR_V86_MODE();
601
602 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
603 {
604 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
605 switch (pIemCpu->enmEffOpSize)
606 {
607 case IEMMODE_16BIT:
608 IEM_MC_BEGIN(0, 1);
609 IEM_MC_LOCAL(uint16_t, u16Tr);
610 IEM_MC_FETCH_TR_U16(u16Tr);
611 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tr);
612 IEM_MC_ADVANCE_RIP();
613 IEM_MC_END();
614 break;
615
616 case IEMMODE_32BIT:
617 IEM_MC_BEGIN(0, 1);
618 IEM_MC_LOCAL(uint32_t, u32Tr);
619 IEM_MC_FETCH_TR_U32(u32Tr);
620 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tr);
621 IEM_MC_ADVANCE_RIP();
622 IEM_MC_END();
623 break;
624
625 case IEMMODE_64BIT:
626 IEM_MC_BEGIN(0, 1);
627 IEM_MC_LOCAL(uint64_t, u64Tr);
628 IEM_MC_FETCH_TR_U64(u64Tr);
629 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tr);
630 IEM_MC_ADVANCE_RIP();
631 IEM_MC_END();
632 break;
633
634 IEM_NOT_REACHED_DEFAULT_CASE_RET();
635 }
636 }
637 else
638 {
639 IEM_MC_BEGIN(0, 2);
640 IEM_MC_LOCAL(uint16_t, u16Tr);
641 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
642 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
643 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
644 IEM_MC_FETCH_TR_U16(u16Tr);
645 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tr);
646 IEM_MC_ADVANCE_RIP();
647 IEM_MC_END();
648 }
649 return VINF_SUCCESS;
650}
651
652
653/** Opcode 0x0f 0x00 /2. */
654FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
655{
656 IEMOP_MNEMONIC("lldt Ew");
657 IEMOP_HLP_NO_REAL_OR_V86_MODE();
658
659 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
660 {
661 IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
662 IEM_MC_BEGIN(1, 0);
663 IEM_MC_ARG(uint16_t, u16Sel, 0);
664 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
665 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
666 IEM_MC_END();
667 }
668 else
669 {
670 IEM_MC_BEGIN(1, 1);
671 IEM_MC_ARG(uint16_t, u16Sel, 0);
672 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
673 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
674 IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
675 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test order */
676 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
677 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
678 IEM_MC_END();
679 }
680 return VINF_SUCCESS;
681}
682
683
684/** Opcode 0x0f 0x00 /3. */
685FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
686{
687 IEMOP_MNEMONIC("ltr Ew");
688 IEMOP_HLP_NO_REAL_OR_V86_MODE();
689
690 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
691 {
692 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
693 IEM_MC_BEGIN(1, 0);
694 IEM_MC_ARG(uint16_t, u16Sel, 0);
695 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
696 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
697 IEM_MC_END();
698 }
699 else
700 {
701 IEM_MC_BEGIN(1, 1);
702 IEM_MC_ARG(uint16_t, u16Sel, 0);
703 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
704 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
705 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
706 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test ordre */
707 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
708 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
709 IEM_MC_END();
710 }
711 return VINF_SUCCESS;
712}
713
714
715/** Opcode 0x0f 0x00 /3. */
716FNIEMOP_DEF_2(iemOpCommonGrp6VerX, uint8_t, bRm, bool, fWrite)
717{
718 IEMOP_HLP_NO_REAL_OR_V86_MODE();
719
720 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
721 {
722 IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
723 IEM_MC_BEGIN(2, 0);
724 IEM_MC_ARG(uint16_t, u16Sel, 0);
725 IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
726 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
727 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
728 IEM_MC_END();
729 }
730 else
731 {
732 IEM_MC_BEGIN(2, 1);
733 IEM_MC_ARG(uint16_t, u16Sel, 0);
734 IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
735 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
736 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
737 IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
738 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
739 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
740 IEM_MC_END();
741 }
742 return VINF_SUCCESS;
743}
744
745
746/** Opcode 0x0f 0x00 /4. */
747FNIEMOP_DEF_1(iemOp_Grp6_verr, uint8_t, bRm)
748{
749 IEMOP_MNEMONIC("verr Ew");
750 return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, false);
751}
752
753
754/** Opcode 0x0f 0x00 /5. */
755FNIEMOP_DEF_1(iemOp_Grp6_verw, uint8_t, bRm)
756{
757 IEMOP_MNEMONIC("verr Ew");
758 return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, true);
759}
760
761
762/** Opcode 0x0f 0x00. */
763FNIEMOP_DEF(iemOp_Grp6)
764{
765 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
766 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
767 {
768 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
769 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
770 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
771 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
772 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
773 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
774 case 6: return IEMOP_RAISE_INVALID_OPCODE();
775 case 7: return IEMOP_RAISE_INVALID_OPCODE();
776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
777 }
778
779}
780
781
782/** Opcode 0x0f 0x01 /0. */
783FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
784{
785 IEMOP_MNEMONIC("sgdt Ms");
786 IEMOP_HLP_64BIT_OP_SIZE();
787 IEM_MC_BEGIN(3, 1);
788 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
789 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
790 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
791 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
792 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
793 IEM_MC_CALL_CIMPL_3(iemCImpl_sgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
794 IEM_MC_END();
795 return VINF_SUCCESS;
796}
797
798
799/** Opcode 0x0f 0x01 /0. */
800FNIEMOP_DEF(iemOp_Grp7_vmcall)
801{
802 IEMOP_BITCH_ABOUT_STUB();
803 return IEMOP_RAISE_INVALID_OPCODE();
804}
805
806
807/** Opcode 0x0f 0x01 /0. */
808FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
809{
810 IEMOP_BITCH_ABOUT_STUB();
811 return IEMOP_RAISE_INVALID_OPCODE();
812}
813
814
815/** Opcode 0x0f 0x01 /0. */
816FNIEMOP_DEF(iemOp_Grp7_vmresume)
817{
818 IEMOP_BITCH_ABOUT_STUB();
819 return IEMOP_RAISE_INVALID_OPCODE();
820}
821
822
823/** Opcode 0x0f 0x01 /0. */
824FNIEMOP_DEF(iemOp_Grp7_vmxoff)
825{
826 IEMOP_BITCH_ABOUT_STUB();
827 return IEMOP_RAISE_INVALID_OPCODE();
828}
829
830
831/** Opcode 0x0f 0x01 /1. */
832FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
833{
834 IEMOP_MNEMONIC("sidt Ms");
835 IEMOP_HLP_64BIT_OP_SIZE();
836 IEM_MC_BEGIN(3, 1);
837 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
838 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
839 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
840 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
841 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
842 IEM_MC_CALL_CIMPL_3(iemCImpl_sidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
843 IEM_MC_END();
844 return VINF_SUCCESS;
845}
846
847
848/** Opcode 0x0f 0x01 /1. */
849FNIEMOP_DEF(iemOp_Grp7_monitor)
850{
851 IEMOP_MNEMONIC("monitor");
852 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo Verify that monitor is allergic to lock prefixes. */
853 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_monitor, pIemCpu->iEffSeg);
854}
855
856
857/** Opcode 0x0f 0x01 /1. */
858FNIEMOP_DEF(iemOp_Grp7_mwait)
859{
860 IEMOP_MNEMONIC("mwait"); /** @todo Verify that mwait is allergic to lock prefixes. */
861 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
862 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_mwait);
863}
864
865
866/** Opcode 0x0f 0x01 /2. */
867FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
868{
869 IEMOP_MNEMONIC("lgdt");
870 IEMOP_HLP_NO_LOCK_PREFIX();
871
872 IEMOP_HLP_64BIT_OP_SIZE();
873 IEM_MC_BEGIN(3, 1);
874 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
875 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
876 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
877 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
878 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
879 IEM_MC_END();
880 return VINF_SUCCESS;
881}
882
883
884/** Opcode 0x0f 0x01 /2. */
885FNIEMOP_DEF(iemOp_Grp7_xgetbv)
886{
887 AssertFailed();
888 return IEMOP_RAISE_INVALID_OPCODE();
889}
890
891
892/** Opcode 0x0f 0x01 /2. */
893FNIEMOP_DEF(iemOp_Grp7_xsetbv)
894{
895 AssertFailed();
896 return IEMOP_RAISE_INVALID_OPCODE();
897}
898
899
900/** Opcode 0x0f 0x01 /3. */
901FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
902{
903 IEMOP_HLP_NO_LOCK_PREFIX();
904
905 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
906 ? IEMMODE_64BIT
907 : pIemCpu->enmEffOpSize;
908 IEM_MC_BEGIN(3, 1);
909 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
910 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
911 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
912 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
913 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
914 IEM_MC_END();
915 return VINF_SUCCESS;
916}
917
918
919/** Opcode 0x0f 0x01 0xd8. */
920FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
921
922/** Opcode 0x0f 0x01 0xd9. */
923FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
924
925/** Opcode 0x0f 0x01 0xda. */
926FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
927
928/** Opcode 0x0f 0x01 0xdb. */
929FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave);
930
931/** Opcode 0x0f 0x01 0xdc. */
932FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi);
933
934/** Opcode 0x0f 0x01 0xdd. */
935FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi);
936
937/** Opcode 0x0f 0x01 0xde. */
938FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit);
939
940/** Opcode 0x0f 0x01 0xdf. */
941FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
942
943/** Opcode 0x0f 0x01 /4. */
944FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
945{
946 IEMOP_MNEMONIC("smsw");
947 IEMOP_HLP_NO_LOCK_PREFIX();
948 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
949 {
950 switch (pIemCpu->enmEffOpSize)
951 {
952 case IEMMODE_16BIT:
953 IEM_MC_BEGIN(0, 1);
954 IEM_MC_LOCAL(uint16_t, u16Tmp);
955 IEM_MC_FETCH_CR0_U16(u16Tmp);
956 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
957 IEM_MC_ADVANCE_RIP();
958 IEM_MC_END();
959 return VINF_SUCCESS;
960
961 case IEMMODE_32BIT:
962 IEM_MC_BEGIN(0, 1);
963 IEM_MC_LOCAL(uint32_t, u32Tmp);
964 IEM_MC_FETCH_CR0_U32(u32Tmp);
965 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
966 IEM_MC_ADVANCE_RIP();
967 IEM_MC_END();
968 return VINF_SUCCESS;
969
970 case IEMMODE_64BIT:
971 IEM_MC_BEGIN(0, 1);
972 IEM_MC_LOCAL(uint64_t, u64Tmp);
973 IEM_MC_FETCH_CR0_U64(u64Tmp);
974 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
975 IEM_MC_ADVANCE_RIP();
976 IEM_MC_END();
977 return VINF_SUCCESS;
978
979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
980 }
981 }
982 else
983 {
984 /* Ignore operand size here, memory refs are always 16-bit. */
985 IEM_MC_BEGIN(0, 2);
986 IEM_MC_LOCAL(uint16_t, u16Tmp);
987 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
988 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
989 IEM_MC_FETCH_CR0_U16(u16Tmp);
990 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
991 IEM_MC_ADVANCE_RIP();
992 IEM_MC_END();
993 return VINF_SUCCESS;
994 }
995}
996
997
998/** Opcode 0x0f 0x01 /6. */
999FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
1000{
1001 /* The operand size is effectively ignored, all is 16-bit and only the
1002 lower 3-bits are used. */
1003 IEMOP_MNEMONIC("lmsw");
1004 IEMOP_HLP_NO_LOCK_PREFIX();
1005 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1006 {
1007 IEM_MC_BEGIN(1, 0);
1008 IEM_MC_ARG(uint16_t, u16Tmp, 0);
1009 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1010 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
1011 IEM_MC_END();
1012 }
1013 else
1014 {
1015 IEM_MC_BEGIN(1, 1);
1016 IEM_MC_ARG(uint16_t, u16Tmp, 0);
1017 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1018 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1019 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
1020 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
1021 IEM_MC_END();
1022 }
1023 return VINF_SUCCESS;
1024}
1025
1026
1027/** Opcode 0x0f 0x01 /7. */
1028FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
1029{
1030 IEMOP_MNEMONIC("invlpg");
1031 IEMOP_HLP_NO_LOCK_PREFIX();
1032 IEM_MC_BEGIN(1, 1);
1033 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 0);
1034 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1035 IEM_MC_CALL_CIMPL_1(iemCImpl_invlpg, GCPtrEffDst);
1036 IEM_MC_END();
1037 return VINF_SUCCESS;
1038}
1039
1040
1041/** Opcode 0x0f 0x01 /7. */
1042FNIEMOP_DEF(iemOp_Grp7_swapgs)
1043{
1044 IEMOP_MNEMONIC("swapgs");
1045 IEMOP_HLP_NO_LOCK_PREFIX();
1046 IEMOP_HLP_ONLY_64BIT();
1047 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_swapgs);
1048}
1049
1050
1051/** Opcode 0x0f 0x01 /7. */
1052FNIEMOP_DEF(iemOp_Grp7_rdtscp)
1053{
1054 NOREF(pIemCpu);
1055 IEMOP_BITCH_ABOUT_STUB();
1056 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
1057}
1058
1059
1060/** Opcode 0x0f 0x01. */
1061FNIEMOP_DEF(iemOp_Grp7)
1062{
1063 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1064 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1065 {
1066 case 0:
1067 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1068 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
1069 switch (bRm & X86_MODRM_RM_MASK)
1070 {
1071 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
1072 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
1073 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
1074 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
1075 }
1076 return IEMOP_RAISE_INVALID_OPCODE();
1077
1078 case 1:
1079 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1080 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
1081 switch (bRm & X86_MODRM_RM_MASK)
1082 {
1083 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
1084 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
1085 }
1086 return IEMOP_RAISE_INVALID_OPCODE();
1087
1088 case 2:
1089 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1090 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
1091 switch (bRm & X86_MODRM_RM_MASK)
1092 {
1093 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
1094 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
1095 }
1096 return IEMOP_RAISE_INVALID_OPCODE();
1097
1098 case 3:
1099 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1100 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
1101 switch (bRm & X86_MODRM_RM_MASK)
1102 {
1103 case 0: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmrun);
1104 case 1: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmmcall);
1105 case 2: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmload);
1106 case 3: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmsave);
1107 case 4: return FNIEMOP_CALL(iemOp_Grp7_Amd_stgi);
1108 case 5: return FNIEMOP_CALL(iemOp_Grp7_Amd_clgi);
1109 case 6: return FNIEMOP_CALL(iemOp_Grp7_Amd_skinit);
1110 case 7: return FNIEMOP_CALL(iemOp_Grp7_Amd_invlpga);
1111 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1112 }
1113
1114 case 4:
1115 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
1116
1117 case 5:
1118 return IEMOP_RAISE_INVALID_OPCODE();
1119
1120 case 6:
1121 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
1122
1123 case 7:
1124 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1125 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
1126 switch (bRm & X86_MODRM_RM_MASK)
1127 {
1128 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
1129 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
1130 }
1131 return IEMOP_RAISE_INVALID_OPCODE();
1132
1133 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1134 }
1135}
1136
1137/** Opcode 0x0f 0x00 /3. */
1138FNIEMOP_DEF_1(iemOpCommonLarLsl_Gv_Ew, bool, fIsLar)
1139{
1140 IEMOP_HLP_NO_REAL_OR_V86_MODE();
1141 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1142
1143 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1144 {
1145 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_REG, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1146 switch (pIemCpu->enmEffOpSize)
1147 {
1148 case IEMMODE_16BIT:
1149 {
1150 IEM_MC_BEGIN(4, 0);
1151 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1152 IEM_MC_ARG(uint16_t, u16Sel, 1);
1153 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1154 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1155
1156 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1157 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1158 IEM_MC_REF_EFLAGS(pEFlags);
1159 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
1160
1161 IEM_MC_END();
1162 return VINF_SUCCESS;
1163 }
1164
1165 case IEMMODE_32BIT:
1166 case IEMMODE_64BIT:
1167 {
1168 IEM_MC_BEGIN(4, 0);
1169 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1170 IEM_MC_ARG(uint16_t, u16Sel, 1);
1171 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1172 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1173
1174 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1175 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1176 IEM_MC_REF_EFLAGS(pEFlags);
1177 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
1178
1179 IEM_MC_END();
1180 return VINF_SUCCESS;
1181 }
1182
1183 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1184 }
1185 }
1186 else
1187 {
1188 switch (pIemCpu->enmEffOpSize)
1189 {
1190 case IEMMODE_16BIT:
1191 {
1192 IEM_MC_BEGIN(4, 1);
1193 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1194 IEM_MC_ARG(uint16_t, u16Sel, 1);
1195 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1196 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1197 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1198
1199 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1200 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1201
1202 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
1203 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1204 IEM_MC_REF_EFLAGS(pEFlags);
1205 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
1206
1207 IEM_MC_END();
1208 return VINF_SUCCESS;
1209 }
1210
1211 case IEMMODE_32BIT:
1212 case IEMMODE_64BIT:
1213 {
1214 IEM_MC_BEGIN(4, 1);
1215 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1216 IEM_MC_ARG(uint16_t, u16Sel, 1);
1217 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1218 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1219 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1220
1221 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1222 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1223
1224 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
1225 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1226 IEM_MC_REF_EFLAGS(pEFlags);
1227 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
1228
1229 IEM_MC_END();
1230 return VINF_SUCCESS;
1231 }
1232
1233 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1234 }
1235 }
1236}
1237
1238
1239
1240/** Opcode 0x0f 0x02. */
1241FNIEMOP_DEF(iemOp_lar_Gv_Ew)
1242{
1243 IEMOP_MNEMONIC("lar Gv,Ew");
1244 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, true);
1245}
1246
1247
1248/** Opcode 0x0f 0x03. */
1249FNIEMOP_DEF(iemOp_lsl_Gv_Ew)
1250{
1251 IEMOP_MNEMONIC("lsl Gv,Ew");
1252 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, false);
1253}
1254
1255
1256/** Opcode 0x0f 0x04. */
1257FNIEMOP_DEF(iemOp_syscall)
1258{
1259 IEMOP_MNEMONIC("syscall");
1260 IEMOP_HLP_NO_LOCK_PREFIX();
1261 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_syscall);
1262}
1263
1264
1265/** Opcode 0x0f 0x05. */
1266FNIEMOP_DEF(iemOp_clts)
1267{
1268 IEMOP_MNEMONIC("clts");
1269 IEMOP_HLP_NO_LOCK_PREFIX();
1270 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
1271}
1272
1273
1274/** Opcode 0x0f 0x06. */
1275FNIEMOP_DEF(iemOp_sysret)
1276{
1277 IEMOP_MNEMONIC("sysret");
1278 IEMOP_HLP_NO_LOCK_PREFIX();
1279 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sysret);
1280}
1281
1282
1283/** Opcode 0x0f 0x08. */
1284FNIEMOP_STUB(iemOp_invd);
1285
1286
1287/** Opcode 0x0f 0x09. */
1288FNIEMOP_DEF(iemOp_wbinvd)
1289{
1290 IEMOP_MNEMONIC("wbinvd");
1291 IEMOP_HLP_NO_LOCK_PREFIX();
1292 IEM_MC_BEGIN(0, 0);
1293 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
1294 IEM_MC_ADVANCE_RIP();
1295 IEM_MC_END();
1296 return VINF_SUCCESS; /* ignore for now */
1297}
1298
1299
1300/** Opcode 0x0f 0x0b. */
1301FNIEMOP_STUB(iemOp_ud2);
1302
1303/** Opcode 0x0f 0x0d. */
1304FNIEMOP_DEF(iemOp_nop_Ev_GrpP)
1305{
1306 /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */
1307 if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_EXT_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW,
1308 X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF))
1309 {
1310 IEMOP_MNEMONIC("GrpP");
1311 return IEMOP_RAISE_INVALID_OPCODE();
1312 }
1313
1314 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1315 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1316 {
1317 IEMOP_MNEMONIC("GrpP");
1318 return IEMOP_RAISE_INVALID_OPCODE();
1319 }
1320
1321 IEMOP_HLP_NO_LOCK_PREFIX();
1322 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1323 {
1324 case 2: /* Aliased to /0 for the time being. */
1325 case 4: /* Aliased to /0 for the time being. */
1326 case 5: /* Aliased to /0 for the time being. */
1327 case 6: /* Aliased to /0 for the time being. */
1328 case 7: /* Aliased to /0 for the time being. */
1329 case 0: IEMOP_MNEMONIC("prefetch"); break;
1330 case 1: IEMOP_MNEMONIC("prefetchw "); break;
1331 case 3: IEMOP_MNEMONIC("prefetchw"); break;
1332 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1333 }
1334
1335 IEM_MC_BEGIN(0, 1);
1336 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1337 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1338 /* Currently a NOP. */
1339 IEM_MC_ADVANCE_RIP();
1340 IEM_MC_END();
1341 return VINF_SUCCESS;
1342}
1343
1344
1345/** Opcode 0x0f 0x0e. */
1346FNIEMOP_STUB(iemOp_femms);
1347
1348
1349/** Opcode 0x0f 0x0f 0x0c. */
1350FNIEMOP_STUB(iemOp_3Dnow_pi2fw_Pq_Qq);
1351
1352/** Opcode 0x0f 0x0f 0x0d. */
1353FNIEMOP_STUB(iemOp_3Dnow_pi2fd_Pq_Qq);
1354
1355/** Opcode 0x0f 0x0f 0x1c. */
1356FNIEMOP_STUB(iemOp_3Dnow_pf2fw_Pq_Qq);
1357
1358/** Opcode 0x0f 0x0f 0x1d. */
1359FNIEMOP_STUB(iemOp_3Dnow_pf2fd_Pq_Qq);
1360
1361/** Opcode 0x0f 0x0f 0x8a. */
1362FNIEMOP_STUB(iemOp_3Dnow_pfnacc_Pq_Qq);
1363
1364/** Opcode 0x0f 0x0f 0x8e. */
1365FNIEMOP_STUB(iemOp_3Dnow_pfpnacc_Pq_Qq);
1366
1367/** Opcode 0x0f 0x0f 0x90. */
1368FNIEMOP_STUB(iemOp_3Dnow_pfcmpge_Pq_Qq);
1369
1370/** Opcode 0x0f 0x0f 0x94. */
1371FNIEMOP_STUB(iemOp_3Dnow_pfmin_Pq_Qq);
1372
1373/** Opcode 0x0f 0x0f 0x96. */
1374FNIEMOP_STUB(iemOp_3Dnow_pfrcp_Pq_Qq);
1375
1376/** Opcode 0x0f 0x0f 0x97. */
1377FNIEMOP_STUB(iemOp_3Dnow_pfrsqrt_Pq_Qq);
1378
1379/** Opcode 0x0f 0x0f 0x9a. */
1380FNIEMOP_STUB(iemOp_3Dnow_pfsub_Pq_Qq);
1381
1382/** Opcode 0x0f 0x0f 0x9e. */
1383FNIEMOP_STUB(iemOp_3Dnow_pfadd_PQ_Qq);
1384
1385/** Opcode 0x0f 0x0f 0xa0. */
1386FNIEMOP_STUB(iemOp_3Dnow_pfcmpgt_Pq_Qq);
1387
1388/** Opcode 0x0f 0x0f 0xa4. */
1389FNIEMOP_STUB(iemOp_3Dnow_pfmax_Pq_Qq);
1390
1391/** Opcode 0x0f 0x0f 0xa6. */
1392FNIEMOP_STUB(iemOp_3Dnow_pfrcpit1_Pq_Qq);
1393
1394/** Opcode 0x0f 0x0f 0xa7. */
1395FNIEMOP_STUB(iemOp_3Dnow_pfrsqit1_Pq_Qq);
1396
1397/** Opcode 0x0f 0x0f 0xaa. */
1398FNIEMOP_STUB(iemOp_3Dnow_pfsubr_Pq_Qq);
1399
1400/** Opcode 0x0f 0x0f 0xae. */
1401FNIEMOP_STUB(iemOp_3Dnow_pfacc_PQ_Qq);
1402
1403/** Opcode 0x0f 0x0f 0xb0. */
1404FNIEMOP_STUB(iemOp_3Dnow_pfcmpeq_Pq_Qq);
1405
1406/** Opcode 0x0f 0x0f 0xb4. */
1407FNIEMOP_STUB(iemOp_3Dnow_pfmul_Pq_Qq);
1408
1409/** Opcode 0x0f 0x0f 0xb6. */
1410FNIEMOP_STUB(iemOp_3Dnow_pfrcpit2_Pq_Qq);
1411
1412/** Opcode 0x0f 0x0f 0xb7. */
1413FNIEMOP_STUB(iemOp_3Dnow_pmulhrw_Pq_Qq);
1414
1415/** Opcode 0x0f 0x0f 0xbb. */
1416FNIEMOP_STUB(iemOp_3Dnow_pswapd_Pq_Qq);
1417
1418/** Opcode 0x0f 0x0f 0xbf. */
1419FNIEMOP_STUB(iemOp_3Dnow_pavgusb_PQ_Qq);
1420
1421
1422/** Opcode 0x0f 0x0f. */
1423FNIEMOP_DEF(iemOp_3Dnow)
1424{
1425 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_3DNOW))
1426 {
1427 IEMOP_MNEMONIC("3Dnow");
1428 return IEMOP_RAISE_INVALID_OPCODE();
1429 }
1430
1431 /* This is pretty sparse, use switch instead of table. */
1432 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
1433 switch (b)
1434 {
1435 case 0x0c: return FNIEMOP_CALL(iemOp_3Dnow_pi2fw_Pq_Qq);
1436 case 0x0d: return FNIEMOP_CALL(iemOp_3Dnow_pi2fd_Pq_Qq);
1437 case 0x1c: return FNIEMOP_CALL(iemOp_3Dnow_pf2fw_Pq_Qq);
1438 case 0x1d: return FNIEMOP_CALL(iemOp_3Dnow_pf2fd_Pq_Qq);
1439 case 0x8a: return FNIEMOP_CALL(iemOp_3Dnow_pfnacc_Pq_Qq);
1440 case 0x8e: return FNIEMOP_CALL(iemOp_3Dnow_pfpnacc_Pq_Qq);
1441 case 0x90: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpge_Pq_Qq);
1442 case 0x94: return FNIEMOP_CALL(iemOp_3Dnow_pfmin_Pq_Qq);
1443 case 0x96: return FNIEMOP_CALL(iemOp_3Dnow_pfrcp_Pq_Qq);
1444 case 0x97: return FNIEMOP_CALL(iemOp_3Dnow_pfrsqrt_Pq_Qq);
1445 case 0x9a: return FNIEMOP_CALL(iemOp_3Dnow_pfsub_Pq_Qq);
1446 case 0x9e: return FNIEMOP_CALL(iemOp_3Dnow_pfadd_PQ_Qq);
1447 case 0xa0: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpgt_Pq_Qq);
1448 case 0xa4: return FNIEMOP_CALL(iemOp_3Dnow_pfmax_Pq_Qq);
1449 case 0xa6: return FNIEMOP_CALL(iemOp_3Dnow_pfrcpit1_Pq_Qq);
1450 case 0xa7: return FNIEMOP_CALL(iemOp_3Dnow_pfrsqit1_Pq_Qq);
1451 case 0xaa: return FNIEMOP_CALL(iemOp_3Dnow_pfsubr_Pq_Qq);
1452 case 0xae: return FNIEMOP_CALL(iemOp_3Dnow_pfacc_PQ_Qq);
1453 case 0xb0: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpeq_Pq_Qq);
1454 case 0xb4: return FNIEMOP_CALL(iemOp_3Dnow_pfmul_Pq_Qq);
1455 case 0xb6: return FNIEMOP_CALL(iemOp_3Dnow_pfrcpit2_Pq_Qq);
1456 case 0xb7: return FNIEMOP_CALL(iemOp_3Dnow_pmulhrw_Pq_Qq);
1457 case 0xbb: return FNIEMOP_CALL(iemOp_3Dnow_pswapd_Pq_Qq);
1458 case 0xbf: return FNIEMOP_CALL(iemOp_3Dnow_pavgusb_PQ_Qq);
1459 default:
1460 return IEMOP_RAISE_INVALID_OPCODE();
1461 }
1462}
1463
1464
1465/** Opcode 0x0f 0x10. */
1466FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
1467/** Opcode 0x0f 0x11. */
1468FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
1469/** Opcode 0x0f 0x12. */
1470FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq); //NEXT
1471/** Opcode 0x0f 0x13. */
1472FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq); //NEXT
1473/** Opcode 0x0f 0x14. */
1474FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
1475/** Opcode 0x0f 0x15. */
1476FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
1477/** Opcode 0x0f 0x16. */
1478FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq); //NEXT
1479/** Opcode 0x0f 0x17. */
1480FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq); //NEXT
1481
1482
1483/** Opcode 0x0f 0x18. */
1484FNIEMOP_DEF(iemOp_prefetch_Grp16)
1485{
1486 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1487 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1488 {
1489 IEMOP_HLP_NO_LOCK_PREFIX();
1490 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1491 {
1492 case 4: /* Aliased to /0 for the time being according to AMD. */
1493 case 5: /* Aliased to /0 for the time being according to AMD. */
1494 case 6: /* Aliased to /0 for the time being according to AMD. */
1495 case 7: /* Aliased to /0 for the time being according to AMD. */
1496 case 0: IEMOP_MNEMONIC("prefetchNTA m8"); break;
1497 case 1: IEMOP_MNEMONIC("prefetchT0 m8"); break;
1498 case 2: IEMOP_MNEMONIC("prefetchT1 m8"); break;
1499 case 3: IEMOP_MNEMONIC("prefetchT2 m8"); break;
1500 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1501 }
1502
1503 IEM_MC_BEGIN(0, 1);
1504 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1505 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1506 /* Currently a NOP. */
1507 IEM_MC_ADVANCE_RIP();
1508 IEM_MC_END();
1509 return VINF_SUCCESS;
1510 }
1511
1512 return IEMOP_RAISE_INVALID_OPCODE();
1513}
1514
1515
1516/** Opcode 0x0f 0x19..0x1f. */
1517FNIEMOP_DEF(iemOp_nop_Ev)
1518{
1519 IEMOP_HLP_NO_LOCK_PREFIX();
1520 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1521 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1522 {
1523 IEM_MC_BEGIN(0, 0);
1524 IEM_MC_ADVANCE_RIP();
1525 IEM_MC_END();
1526 }
1527 else
1528 {
1529 IEM_MC_BEGIN(0, 1);
1530 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1531 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1532 /* Currently a NOP. */
1533 IEM_MC_ADVANCE_RIP();
1534 IEM_MC_END();
1535 }
1536 return VINF_SUCCESS;
1537}
1538
1539
1540/** Opcode 0x0f 0x20. */
1541FNIEMOP_DEF(iemOp_mov_Rd_Cd)
1542{
1543 /* mod is ignored, as is operand size overrides. */
1544 IEMOP_MNEMONIC("mov Rd,Cd");
1545 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1546 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1547 else
1548 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1549
1550 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1551 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1552 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1553 {
1554 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1555 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1556 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
1557 iCrReg |= 8;
1558 }
1559 switch (iCrReg)
1560 {
1561 case 0: case 2: case 3: case 4: case 8:
1562 break;
1563 default:
1564 return IEMOP_RAISE_INVALID_OPCODE();
1565 }
1566 IEMOP_HLP_DONE_DECODING();
1567
1568 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
1569}
1570
1571
1572/** Opcode 0x0f 0x21. */
1573FNIEMOP_DEF(iemOp_mov_Rd_Dd)
1574{
1575 IEMOP_MNEMONIC("mov Rd,Dd");
1576 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1577 IEMOP_HLP_NO_LOCK_PREFIX();
1578 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1579 return IEMOP_RAISE_INVALID_OPCODE();
1580 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1581 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1582 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1583}
1584
1585
1586/** Opcode 0x0f 0x22. */
1587FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1588{
1589 /* mod is ignored, as is operand size overrides. */
1590 IEMOP_MNEMONIC("mov Cd,Rd");
1591 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1592 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1593 else
1594 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1595
1596 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1597 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1598 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1599 {
1600 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1601 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1602 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
1603 iCrReg |= 8;
1604 }
1605 switch (iCrReg)
1606 {
1607 case 0: case 2: case 3: case 4: case 8:
1608 break;
1609 default:
1610 return IEMOP_RAISE_INVALID_OPCODE();
1611 }
1612 IEMOP_HLP_DONE_DECODING();
1613
1614 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1615}
1616
1617
1618/** Opcode 0x0f 0x23. */
1619FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1620{
1621 IEMOP_MNEMONIC("mov Dd,Rd");
1622 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1623 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1624 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1625 return IEMOP_RAISE_INVALID_OPCODE();
1626 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1627 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1628 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1629}
1630
1631
1632/** Opcode 0x0f 0x24. */
1633FNIEMOP_DEF(iemOp_mov_Rd_Td)
1634{
1635 IEMOP_MNEMONIC("mov Rd,Td");
1636 /* The RM byte is not considered, see testcase. */
1637 return IEMOP_RAISE_INVALID_OPCODE();
1638}
1639
1640
1641/** Opcode 0x0f 0x26. */
1642FNIEMOP_DEF(iemOp_mov_Td_Rd)
1643{
1644 IEMOP_MNEMONIC("mov Td,Rd");
1645 /* The RM byte is not considered, see testcase. */
1646 return IEMOP_RAISE_INVALID_OPCODE();
1647}
1648
1649
1650/** Opcode 0x0f 0x28. */
1651FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1652/** Opcode 0x0f 0x29. */
1653FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1654/** Opcode 0x0f 0x2a. */
1655FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey); //NEXT
1656/** Opcode 0x0f 0x2b. */
1657FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd); //NEXT:XP
1658/** Opcode 0x0f 0x2c. */
1659FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd); //NEXT
1660/** Opcode 0x0f 0x2d. */
1661FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1662/** Opcode 0x0f 0x2e. */
1663FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd); //NEXT
1664/** Opcode 0x0f 0x2f. */
1665FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1666
1667
1668/** Opcode 0x0f 0x30. */
1669FNIEMOP_DEF(iemOp_wrmsr)
1670{
1671 IEMOP_MNEMONIC("wrmsr");
1672 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1673 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_wrmsr);
1674}
1675
1676
1677/** Opcode 0x0f 0x31. */
1678FNIEMOP_DEF(iemOp_rdtsc)
1679{
1680 IEMOP_MNEMONIC("rdtsc");
1681 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1682 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1683}
1684
1685
1686/** Opcode 0x0f 0x33. */
1687FNIEMOP_DEF(iemOp_rdmsr)
1688{
1689 IEMOP_MNEMONIC("rdmsr");
1690 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1691 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdmsr);
1692}
1693
1694
1695/** Opcode 0x0f 0x34. */
1696FNIEMOP_STUB(iemOp_rdpmc);
1697/** Opcode 0x0f 0x34. */
1698FNIEMOP_STUB(iemOp_sysenter);
1699/** Opcode 0x0f 0x35. */
1700FNIEMOP_STUB(iemOp_sysexit);
1701/** Opcode 0x0f 0x37. */
1702FNIEMOP_STUB(iemOp_getsec);
1703/** Opcode 0x0f 0x38. */
1704FNIEMOP_UD_STUB(iemOp_3byte_Esc_A4); /* Here there be dragons... */
1705/** Opcode 0x0f 0x3a. */
1706FNIEMOP_UD_STUB(iemOp_3byte_Esc_A5); /* Here there be dragons... */
1707/** Opcode 0x0f 0x3c (?). */
1708FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1709
1710/**
1711 * Implements a conditional move.
1712 *
1713 * Wish there was an obvious way to do this where we could share and reduce
1714 * code bloat.
1715 *
1716 * @param a_Cnd The conditional "microcode" operation.
1717 */
1718#define CMOV_X(a_Cnd) \
1719 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1720 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1721 { \
1722 switch (pIemCpu->enmEffOpSize) \
1723 { \
1724 case IEMMODE_16BIT: \
1725 IEM_MC_BEGIN(0, 1); \
1726 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1727 a_Cnd { \
1728 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1729 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1730 } IEM_MC_ENDIF(); \
1731 IEM_MC_ADVANCE_RIP(); \
1732 IEM_MC_END(); \
1733 return VINF_SUCCESS; \
1734 \
1735 case IEMMODE_32BIT: \
1736 IEM_MC_BEGIN(0, 1); \
1737 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1738 a_Cnd { \
1739 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1740 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1741 } IEM_MC_ELSE() { \
1742 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1743 } IEM_MC_ENDIF(); \
1744 IEM_MC_ADVANCE_RIP(); \
1745 IEM_MC_END(); \
1746 return VINF_SUCCESS; \
1747 \
1748 case IEMMODE_64BIT: \
1749 IEM_MC_BEGIN(0, 1); \
1750 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1751 a_Cnd { \
1752 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1753 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1754 } IEM_MC_ENDIF(); \
1755 IEM_MC_ADVANCE_RIP(); \
1756 IEM_MC_END(); \
1757 return VINF_SUCCESS; \
1758 \
1759 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1760 } \
1761 } \
1762 else \
1763 { \
1764 switch (pIemCpu->enmEffOpSize) \
1765 { \
1766 case IEMMODE_16BIT: \
1767 IEM_MC_BEGIN(0, 2); \
1768 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1769 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1770 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1771 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1772 a_Cnd { \
1773 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1774 } IEM_MC_ENDIF(); \
1775 IEM_MC_ADVANCE_RIP(); \
1776 IEM_MC_END(); \
1777 return VINF_SUCCESS; \
1778 \
1779 case IEMMODE_32BIT: \
1780 IEM_MC_BEGIN(0, 2); \
1781 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1782 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1783 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1784 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1785 a_Cnd { \
1786 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1787 } IEM_MC_ELSE() { \
1788 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1789 } IEM_MC_ENDIF(); \
1790 IEM_MC_ADVANCE_RIP(); \
1791 IEM_MC_END(); \
1792 return VINF_SUCCESS; \
1793 \
1794 case IEMMODE_64BIT: \
1795 IEM_MC_BEGIN(0, 2); \
1796 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1797 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1798 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1799 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1800 a_Cnd { \
1801 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1802 } IEM_MC_ENDIF(); \
1803 IEM_MC_ADVANCE_RIP(); \
1804 IEM_MC_END(); \
1805 return VINF_SUCCESS; \
1806 \
1807 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1808 } \
1809 } do {} while (0)
1810
1811
1812
1813/** Opcode 0x0f 0x40. */
1814FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1815{
1816 IEMOP_MNEMONIC("cmovo Gv,Ev");
1817 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1818}
1819
1820
1821/** Opcode 0x0f 0x41. */
1822FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1823{
1824 IEMOP_MNEMONIC("cmovno Gv,Ev");
1825 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1826}
1827
1828
1829/** Opcode 0x0f 0x42. */
1830FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1831{
1832 IEMOP_MNEMONIC("cmovc Gv,Ev");
1833 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1834}
1835
1836
1837/** Opcode 0x0f 0x43. */
1838FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1839{
1840 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1841 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1842}
1843
1844
1845/** Opcode 0x0f 0x44. */
1846FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1847{
1848 IEMOP_MNEMONIC("cmove Gv,Ev");
1849 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1850}
1851
1852
1853/** Opcode 0x0f 0x45. */
1854FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1855{
1856 IEMOP_MNEMONIC("cmovne Gv,Ev");
1857 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1858}
1859
1860
1861/** Opcode 0x0f 0x46. */
1862FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1863{
1864 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1865 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1866}
1867
1868
1869/** Opcode 0x0f 0x47. */
1870FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1871{
1872 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1873 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1874}
1875
1876
1877/** Opcode 0x0f 0x48. */
1878FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1879{
1880 IEMOP_MNEMONIC("cmovs Gv,Ev");
1881 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1882}
1883
1884
1885/** Opcode 0x0f 0x49. */
1886FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1887{
1888 IEMOP_MNEMONIC("cmovns Gv,Ev");
1889 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1890}
1891
1892
1893/** Opcode 0x0f 0x4a. */
1894FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1895{
1896 IEMOP_MNEMONIC("cmovp Gv,Ev");
1897 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1898}
1899
1900
1901/** Opcode 0x0f 0x4b. */
1902FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1903{
1904 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1905 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1906}
1907
1908
1909/** Opcode 0x0f 0x4c. */
1910FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1911{
1912 IEMOP_MNEMONIC("cmovl Gv,Ev");
1913 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1914}
1915
1916
1917/** Opcode 0x0f 0x4d. */
1918FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1919{
1920 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1921 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1922}
1923
1924
1925/** Opcode 0x0f 0x4e. */
1926FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1927{
1928 IEMOP_MNEMONIC("cmovle Gv,Ev");
1929 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1930}
1931
1932
1933/** Opcode 0x0f 0x4f. */
1934FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1935{
1936 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1937 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1938}
1939
1940#undef CMOV_X
1941
1942/** Opcode 0x0f 0x50. */
1943FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1944/** Opcode 0x0f 0x51. */
1945FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1946/** Opcode 0x0f 0x52. */
1947FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1948/** Opcode 0x0f 0x53. */
1949FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1950/** Opcode 0x0f 0x54. */
1951FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1952/** Opcode 0x0f 0x55. */
1953FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1954/** Opcode 0x0f 0x56. */
1955FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1956/** Opcode 0x0f 0x57. */
1957FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1958/** Opcode 0x0f 0x58. */
1959FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd); //NEXT
1960/** Opcode 0x0f 0x59. */
1961FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);//NEXT
1962/** Opcode 0x0f 0x5a. */
1963FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1964/** Opcode 0x0f 0x5b. */
1965FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1966/** Opcode 0x0f 0x5c. */
1967FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1968/** Opcode 0x0f 0x5d. */
1969FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1970/** Opcode 0x0f 0x5e. */
1971FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1972/** Opcode 0x0f 0x5f. */
1973FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1974
1975
1976/**
1977 * Common worker for SSE2 and MMX instructions on the forms:
1978 * pxxxx xmm1, xmm2/mem128
1979 * pxxxx mm1, mm2/mem32
1980 *
1981 * The 2nd operand is the first half of a register, which in the memory case
1982 * means a 32-bit memory access for MMX and 128-bit aligned 64-bit or 128-bit
1983 * memory accessed for MMX.
1984 *
1985 * Exceptions type 4.
1986 */
1987FNIEMOP_DEF_1(iemOpCommonMmxSse_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl)
1988{
1989 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1990 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
1991 {
1992 case IEM_OP_PRF_SIZE_OP: /* SSE */
1993 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1994 {
1995 /*
1996 * Register, register.
1997 */
1998 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1999 IEM_MC_BEGIN(2, 0);
2000 IEM_MC_ARG(uint128_t *, pDst, 0);
2001 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2002 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2003 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2004 IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2005 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2006 IEM_MC_ADVANCE_RIP();
2007 IEM_MC_END();
2008 }
2009 else
2010 {
2011 /*
2012 * Register, memory.
2013 */
2014 IEM_MC_BEGIN(2, 2);
2015 IEM_MC_ARG(uint128_t *, pDst, 0);
2016 IEM_MC_LOCAL(uint64_t, uSrc);
2017 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2018 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2019
2020 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2021 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2022 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2023 IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2024
2025 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2026 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2027
2028 IEM_MC_ADVANCE_RIP();
2029 IEM_MC_END();
2030 }
2031 return VINF_SUCCESS;
2032
2033 case 0: /* MMX */
2034 if (!pImpl->pfnU64)
2035 return IEMOP_RAISE_INVALID_OPCODE();
2036 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2037 {
2038 /*
2039 * Register, register.
2040 */
2041 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2042 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2043 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2044 IEM_MC_BEGIN(2, 0);
2045 IEM_MC_ARG(uint64_t *, pDst, 0);
2046 IEM_MC_ARG(uint32_t const *, pSrc, 1);
2047 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2048 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2049 IEM_MC_REF_MREG_U32_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2050 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2051 IEM_MC_ADVANCE_RIP();
2052 IEM_MC_END();
2053 }
2054 else
2055 {
2056 /*
2057 * Register, memory.
2058 */
2059 IEM_MC_BEGIN(2, 2);
2060 IEM_MC_ARG(uint64_t *, pDst, 0);
2061 IEM_MC_LOCAL(uint32_t, uSrc);
2062 IEM_MC_ARG_LOCAL_REF(uint32_t const *, pSrc, uSrc, 1);
2063 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2064
2065 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2066 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2067 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2068 IEM_MC_FETCH_MEM_U32(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2069
2070 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2071 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2072
2073 IEM_MC_ADVANCE_RIP();
2074 IEM_MC_END();
2075 }
2076 return VINF_SUCCESS;
2077
2078 default:
2079 return IEMOP_RAISE_INVALID_OPCODE();
2080 }
2081}
2082
2083
2084/** Opcode 0x0f 0x60. */
2085FNIEMOP_DEF(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq)
2086{
2087 IEMOP_MNEMONIC("punpcklbw");
2088 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklbw);
2089}
2090
2091
2092/** Opcode 0x0f 0x61. */
2093FNIEMOP_DEF(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq)
2094{
2095 IEMOP_MNEMONIC("punpcklwd"); /** @todo AMD mark the MMX version as 3DNow!. Intel says MMX CPUID req. */
2096 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklwd);
2097}
2098
2099
2100/** Opcode 0x0f 0x62. */
2101FNIEMOP_DEF(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq)
2102{
2103 IEMOP_MNEMONIC("punpckldq");
2104 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpckldq);
2105}
2106
2107
2108/** Opcode 0x0f 0x63. */
2109FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
2110/** Opcode 0x0f 0x64. */
2111FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
2112/** Opcode 0x0f 0x65. */
2113FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
2114/** Opcode 0x0f 0x66. */
2115FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
2116/** Opcode 0x0f 0x67. */
2117FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
2118
2119
2120/**
2121 * Common worker for SSE2 and MMX instructions on the forms:
2122 * pxxxx xmm1, xmm2/mem128
2123 * pxxxx mm1, mm2/mem64
2124 *
2125 * The 2nd operand is the second half of a register, which in the memory case
2126 * means a 64-bit memory access for MMX, and for MMX a 128-bit aligned access
2127 * where it may read the full 128 bits or only the upper 64 bits.
2128 *
2129 * Exceptions type 4.
2130 */
2131FNIEMOP_DEF_1(iemOpCommonMmxSse_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl)
2132{
2133 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2134 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2135 {
2136 case IEM_OP_PRF_SIZE_OP: /* SSE */
2137 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2138 {
2139 /*
2140 * Register, register.
2141 */
2142 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2143 IEM_MC_BEGIN(2, 0);
2144 IEM_MC_ARG(uint128_t *, pDst, 0);
2145 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2146 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2147 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2148 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2149 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2150 IEM_MC_ADVANCE_RIP();
2151 IEM_MC_END();
2152 }
2153 else
2154 {
2155 /*
2156 * Register, memory.
2157 */
2158 IEM_MC_BEGIN(2, 2);
2159 IEM_MC_ARG(uint128_t *, pDst, 0);
2160 IEM_MC_LOCAL(uint128_t, uSrc);
2161 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2162 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2163
2164 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2165 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2166 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2167 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */
2168
2169 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2170 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2171
2172 IEM_MC_ADVANCE_RIP();
2173 IEM_MC_END();
2174 }
2175 return VINF_SUCCESS;
2176
2177 case 0: /* MMX */
2178 if (!pImpl->pfnU64)
2179 return IEMOP_RAISE_INVALID_OPCODE();
2180 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2181 {
2182 /*
2183 * Register, register.
2184 */
2185 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2186 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2187 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2188 IEM_MC_BEGIN(2, 0);
2189 IEM_MC_ARG(uint64_t *, pDst, 0);
2190 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2191 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2192 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2193 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2194 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2195 IEM_MC_ADVANCE_RIP();
2196 IEM_MC_END();
2197 }
2198 else
2199 {
2200 /*
2201 * Register, memory.
2202 */
2203 IEM_MC_BEGIN(2, 2);
2204 IEM_MC_ARG(uint64_t *, pDst, 0);
2205 IEM_MC_LOCAL(uint64_t, uSrc);
2206 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2207 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2208
2209 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2210 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2211 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2212 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2213
2214 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2215 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2216
2217 IEM_MC_ADVANCE_RIP();
2218 IEM_MC_END();
2219 }
2220 return VINF_SUCCESS;
2221
2222 default:
2223 return IEMOP_RAISE_INVALID_OPCODE();
2224 }
2225}
2226
2227
2228/** Opcode 0x0f 0x68. */
2229FNIEMOP_DEF(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq)
2230{
2231 IEMOP_MNEMONIC("punpckhbw");
2232 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhbw);
2233}
2234
2235
2236/** Opcode 0x0f 0x69. */
2237FNIEMOP_DEF(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq)
2238{
2239 IEMOP_MNEMONIC("punpckhwd");
2240 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhwd);
2241}
2242
2243
2244/** Opcode 0x0f 0x6a. */
2245FNIEMOP_DEF(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq)
2246{
2247 IEMOP_MNEMONIC("punpckhdq");
2248 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhdq);
2249}
2250
2251/** Opcode 0x0f 0x6b. */
2252FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
2253
2254
2255/** Opcode 0x0f 0x6c. */
2256FNIEMOP_DEF(iemOp_punpcklqdq_Vdq_Wdq)
2257{
2258 IEMOP_MNEMONIC("punpcklqdq");
2259 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklqdq);
2260}
2261
2262
2263/** Opcode 0x0f 0x6d. */
2264FNIEMOP_DEF(iemOp_punpckhqdq_Vdq_Wdq)
2265{
2266 IEMOP_MNEMONIC("punpckhqdq");
2267 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhqdq);
2268}
2269
2270
2271/** Opcode 0x0f 0x6e. */
2272FNIEMOP_DEF(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey)
2273{
2274 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2275 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2276 {
2277 case IEM_OP_PRF_SIZE_OP: /* SSE */
2278 IEMOP_MNEMONIC("movd/q Wd/q,Ed/q");
2279 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2280 {
2281 /* XMM, greg*/
2282 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2283 IEM_MC_BEGIN(0, 1);
2284 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2285 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2286 {
2287 IEM_MC_LOCAL(uint64_t, u64Tmp);
2288 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2289 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
2290 }
2291 else
2292 {
2293 IEM_MC_LOCAL(uint32_t, u32Tmp);
2294 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2295 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
2296 }
2297 IEM_MC_ADVANCE_RIP();
2298 IEM_MC_END();
2299 }
2300 else
2301 {
2302 /* XMM, [mem] */
2303 IEM_MC_BEGIN(0, 2);
2304 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2305 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2306 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2307 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2308 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2309 {
2310 IEM_MC_LOCAL(uint64_t, u64Tmp);
2311 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2312 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
2313 }
2314 else
2315 {
2316 IEM_MC_LOCAL(uint32_t, u32Tmp);
2317 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2318 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
2319 }
2320 IEM_MC_ADVANCE_RIP();
2321 IEM_MC_END();
2322 }
2323 return VINF_SUCCESS;
2324
2325 case 0: /* MMX */
2326 IEMOP_MNEMONIC("movq/d Pd/q,Ed/q");
2327 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2328 {
2329 /* MMX, greg */
2330 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2331 IEM_MC_BEGIN(0, 1);
2332 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2333 IEM_MC_LOCAL(uint64_t, u64Tmp);
2334 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2335 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2336 else
2337 IEM_MC_FETCH_GREG_U32_ZX_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2338 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2339 IEM_MC_ADVANCE_RIP();
2340 IEM_MC_END();
2341 }
2342 else
2343 {
2344 /* MMX, [mem] */
2345 IEM_MC_BEGIN(0, 2);
2346 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2347 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2348 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2349 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2350 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2351 {
2352 IEM_MC_LOCAL(uint64_t, u64Tmp);
2353 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2354 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2355 }
2356 else
2357 {
2358 IEM_MC_LOCAL(uint32_t, u32Tmp);
2359 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2360 IEM_MC_STORE_MREG_U32_ZX_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u32Tmp);
2361 }
2362 IEM_MC_ADVANCE_RIP();
2363 IEM_MC_END();
2364 }
2365 return VINF_SUCCESS;
2366
2367 default:
2368 return IEMOP_RAISE_INVALID_OPCODE();
2369 }
2370}
2371
2372
2373/** Opcode 0x0f 0x6f. */
2374FNIEMOP_DEF(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq)
2375{
2376 bool fAligned = false;
2377 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2378 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2379 {
2380 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
2381 fAligned = true;
2382 case IEM_OP_PRF_REPZ: /* SSE unaligned */
2383 if (fAligned)
2384 IEMOP_MNEMONIC("movdqa Vdq,Wdq");
2385 else
2386 IEMOP_MNEMONIC("movdqu Vdq,Wdq");
2387 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2388 {
2389 /*
2390 * Register, register.
2391 */
2392 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2393 IEM_MC_BEGIN(0, 1);
2394 IEM_MC_LOCAL(uint128_t, u128Tmp);
2395 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2396 IEM_MC_FETCH_XREG_U128(u128Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2397 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp);
2398 IEM_MC_ADVANCE_RIP();
2399 IEM_MC_END();
2400 }
2401 else
2402 {
2403 /*
2404 * Register, memory.
2405 */
2406 IEM_MC_BEGIN(0, 2);
2407 IEM_MC_LOCAL(uint128_t, u128Tmp);
2408 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2409
2410 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2411 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2412 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2413 if (fAligned)
2414 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2415 else
2416 IEM_MC_FETCH_MEM_U128(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2417 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp);
2418
2419 IEM_MC_ADVANCE_RIP();
2420 IEM_MC_END();
2421 }
2422 return VINF_SUCCESS;
2423
2424 case 0: /* MMX */
2425 IEMOP_MNEMONIC("movq Pq,Qq");
2426 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2427 {
2428 /*
2429 * Register, register.
2430 */
2431 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2432 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2433 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2434 IEM_MC_BEGIN(0, 1);
2435 IEM_MC_LOCAL(uint64_t, u64Tmp);
2436 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2437 IEM_MC_FETCH_MREG_U64(u64Tmp, bRm & X86_MODRM_RM_MASK);
2438 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2439 IEM_MC_ADVANCE_RIP();
2440 IEM_MC_END();
2441 }
2442 else
2443 {
2444 /*
2445 * Register, memory.
2446 */
2447 IEM_MC_BEGIN(0, 2);
2448 IEM_MC_LOCAL(uint64_t, u64Tmp);
2449 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2450
2451 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2452 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2453 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2454 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2455 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2456
2457 IEM_MC_ADVANCE_RIP();
2458 IEM_MC_END();
2459 }
2460 return VINF_SUCCESS;
2461
2462 default:
2463 return IEMOP_RAISE_INVALID_OPCODE();
2464 }
2465}
2466
2467
2468/** Opcode 0x0f 0x70. The immediate here is evil! */
2469FNIEMOP_DEF(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib)
2470{
2471 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2472 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2473 {
2474 case IEM_OP_PRF_SIZE_OP: /* SSE */
2475 case IEM_OP_PRF_REPNZ: /* SSE */
2476 case IEM_OP_PRF_REPZ: /* SSE */
2477 {
2478 PFNIEMAIMPLMEDIAPSHUF pfnAImpl;
2479 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2480 {
2481 case IEM_OP_PRF_SIZE_OP:
2482 IEMOP_MNEMONIC("pshufd Vdq,Wdq,Ib");
2483 pfnAImpl = iemAImpl_pshufd;
2484 break;
2485 case IEM_OP_PRF_REPNZ:
2486 IEMOP_MNEMONIC("pshuflw Vdq,Wdq,Ib");
2487 pfnAImpl = iemAImpl_pshuflw;
2488 break;
2489 case IEM_OP_PRF_REPZ:
2490 IEMOP_MNEMONIC("pshufhw Vdq,Wdq,Ib");
2491 pfnAImpl = iemAImpl_pshufhw;
2492 break;
2493 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2494 }
2495 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2496 {
2497 /*
2498 * Register, register.
2499 */
2500 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2501 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2502
2503 IEM_MC_BEGIN(3, 0);
2504 IEM_MC_ARG(uint128_t *, pDst, 0);
2505 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2506 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2507 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2508 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2509 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2510 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
2511 IEM_MC_ADVANCE_RIP();
2512 IEM_MC_END();
2513 }
2514 else
2515 {
2516 /*
2517 * Register, memory.
2518 */
2519 IEM_MC_BEGIN(3, 2);
2520 IEM_MC_ARG(uint128_t *, pDst, 0);
2521 IEM_MC_LOCAL(uint128_t, uSrc);
2522 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2523 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2524
2525 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2526 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2527 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2528 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2529 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2530
2531 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2532 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2533 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
2534
2535 IEM_MC_ADVANCE_RIP();
2536 IEM_MC_END();
2537 }
2538 return VINF_SUCCESS;
2539 }
2540
2541 case 0: /* MMX Extension */
2542 IEMOP_MNEMONIC("pshufw Pq,Qq,Ib");
2543 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2544 {
2545 /*
2546 * Register, register.
2547 */
2548 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2549 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2550
2551 IEM_MC_BEGIN(3, 0);
2552 IEM_MC_ARG(uint64_t *, pDst, 0);
2553 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2554 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2555 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
2556 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2557 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2558 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
2559 IEM_MC_ADVANCE_RIP();
2560 IEM_MC_END();
2561 }
2562 else
2563 {
2564 /*
2565 * Register, memory.
2566 */
2567 IEM_MC_BEGIN(3, 2);
2568 IEM_MC_ARG(uint64_t *, pDst, 0);
2569 IEM_MC_LOCAL(uint64_t, uSrc);
2570 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2571 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2572
2573 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2574 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2575 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2576 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2577 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
2578
2579 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2580 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2581 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
2582
2583 IEM_MC_ADVANCE_RIP();
2584 IEM_MC_END();
2585 }
2586 return VINF_SUCCESS;
2587
2588 default:
2589 return IEMOP_RAISE_INVALID_OPCODE();
2590 }
2591}
2592
2593
2594/** Opcode 0x0f 0x71 11/2. */
2595FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Nq_Ib, uint8_t, bRm);
2596
2597/** Opcode 0x66 0x0f 0x71 11/2. */
2598FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Udq_Ib, uint8_t, bRm);
2599
2600/** Opcode 0x0f 0x71 11/4. */
2601FNIEMOP_STUB_1(iemOp_Grp12_psraw_Nq_Ib, uint8_t, bRm);
2602
2603/** Opcode 0x66 0x0f 0x71 11/4. */
2604FNIEMOP_STUB_1(iemOp_Grp12_psraw_Udq_Ib, uint8_t, bRm);
2605
2606/** Opcode 0x0f 0x71 11/6. */
2607FNIEMOP_STUB_1(iemOp_Grp12_psllw_Nq_Ib, uint8_t, bRm);
2608
2609/** Opcode 0x66 0x0f 0x71 11/6. */
2610FNIEMOP_STUB_1(iemOp_Grp12_psllw_Udq_Ib, uint8_t, bRm);
2611
2612
2613/** Opcode 0x0f 0x71. */
2614FNIEMOP_DEF(iemOp_Grp12)
2615{
2616 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2617 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2618 return IEMOP_RAISE_INVALID_OPCODE();
2619 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2620 {
2621 case 0: case 1: case 3: case 5: case 7:
2622 return IEMOP_RAISE_INVALID_OPCODE();
2623 case 2:
2624 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2625 {
2626 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psrlw_Nq_Ib, bRm);
2627 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psrlw_Udq_Ib, bRm);
2628 default: return IEMOP_RAISE_INVALID_OPCODE();
2629 }
2630 case 4:
2631 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2632 {
2633 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psraw_Nq_Ib, bRm);
2634 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psraw_Udq_Ib, bRm);
2635 default: return IEMOP_RAISE_INVALID_OPCODE();
2636 }
2637 case 6:
2638 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2639 {
2640 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psllw_Nq_Ib, bRm);
2641 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psllw_Udq_Ib, bRm);
2642 default: return IEMOP_RAISE_INVALID_OPCODE();
2643 }
2644 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2645 }
2646}
2647
2648
2649/** Opcode 0x0f 0x72 11/2. */
2650FNIEMOP_STUB_1(iemOp_Grp13_psrld_Nq_Ib, uint8_t, bRm);
2651
2652/** Opcode 0x66 0x0f 0x72 11/2. */
2653FNIEMOP_STUB_1(iemOp_Grp13_psrld_Udq_Ib, uint8_t, bRm);
2654
2655/** Opcode 0x0f 0x72 11/4. */
2656FNIEMOP_STUB_1(iemOp_Grp13_psrad_Nq_Ib, uint8_t, bRm);
2657
2658/** Opcode 0x66 0x0f 0x72 11/4. */
2659FNIEMOP_STUB_1(iemOp_Grp13_psrad_Udq_Ib, uint8_t, bRm);
2660
2661/** Opcode 0x0f 0x72 11/6. */
2662FNIEMOP_STUB_1(iemOp_Grp13_pslld_Nq_Ib, uint8_t, bRm);
2663
2664/** Opcode 0x66 0x0f 0x72 11/6. */
2665FNIEMOP_STUB_1(iemOp_Grp13_pslld_Udq_Ib, uint8_t, bRm);
2666
2667
2668/** Opcode 0x0f 0x72. */
2669FNIEMOP_DEF(iemOp_Grp13)
2670{
2671 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2672 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2673 return IEMOP_RAISE_INVALID_OPCODE();
2674 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2675 {
2676 case 0: case 1: case 3: case 5: case 7:
2677 return IEMOP_RAISE_INVALID_OPCODE();
2678 case 2:
2679 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2680 {
2681 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrld_Nq_Ib, bRm);
2682 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_psrld_Udq_Ib, bRm);
2683 default: return IEMOP_RAISE_INVALID_OPCODE();
2684 }
2685 case 4:
2686 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2687 {
2688 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrad_Nq_Ib, bRm);
2689 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_psrad_Udq_Ib, bRm);
2690 default: return IEMOP_RAISE_INVALID_OPCODE();
2691 }
2692 case 6:
2693 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2694 {
2695 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_pslld_Nq_Ib, bRm);
2696 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_pslld_Udq_Ib, bRm);
2697 default: return IEMOP_RAISE_INVALID_OPCODE();
2698 }
2699 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2700 }
2701}
2702
2703
2704/** Opcode 0x0f 0x73 11/2. */
2705FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Nq_Ib, uint8_t, bRm);
2706
2707/** Opcode 0x66 0x0f 0x73 11/2. */
2708FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Udq_Ib, uint8_t, bRm);
2709
2710/** Opcode 0x66 0x0f 0x73 11/3. */
2711FNIEMOP_STUB_1(iemOp_Grp14_psrldq_Udq_Ib, uint8_t, bRm); //NEXT
2712
2713/** Opcode 0x0f 0x73 11/6. */
2714FNIEMOP_STUB_1(iemOp_Grp14_psllq_Nq_Ib, uint8_t, bRm);
2715
2716/** Opcode 0x66 0x0f 0x73 11/6. */
2717FNIEMOP_STUB_1(iemOp_Grp14_psllq_Udq_Ib, uint8_t, bRm);
2718
2719/** Opcode 0x66 0x0f 0x73 11/7. */
2720FNIEMOP_STUB_1(iemOp_Grp14_pslldq_Udq_Ib, uint8_t, bRm); //NEXT
2721
2722
2723/** Opcode 0x0f 0x73. */
2724FNIEMOP_DEF(iemOp_Grp14)
2725{
2726 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2727 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2728 return IEMOP_RAISE_INVALID_OPCODE();
2729 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2730 {
2731 case 0: case 1: case 4: case 5:
2732 return IEMOP_RAISE_INVALID_OPCODE();
2733 case 2:
2734 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2735 {
2736 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psrlq_Nq_Ib, bRm);
2737 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psrlq_Udq_Ib, bRm);
2738 default: return IEMOP_RAISE_INVALID_OPCODE();
2739 }
2740 case 3:
2741 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2742 {
2743 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psrldq_Udq_Ib, bRm);
2744 default: return IEMOP_RAISE_INVALID_OPCODE();
2745 }
2746 case 6:
2747 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2748 {
2749 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psllq_Nq_Ib, bRm);
2750 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psllq_Udq_Ib, bRm);
2751 default: return IEMOP_RAISE_INVALID_OPCODE();
2752 }
2753 case 7:
2754 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2755 {
2756 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_pslldq_Udq_Ib, bRm);
2757 default: return IEMOP_RAISE_INVALID_OPCODE();
2758 }
2759 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2760 }
2761}
2762
2763
2764/**
2765 * Common worker for SSE2 and MMX instructions on the forms:
2766 * pxxx mm1, mm2/mem64
2767 * pxxx xmm1, xmm2/mem128
2768 *
2769 * Proper alignment of the 128-bit operand is enforced.
2770 * Exceptions type 4. SSE2 and MMX cpuid checks.
2771 */
2772FNIEMOP_DEF_1(iemOpCommonMmxSse2_FullFull_To_Full, PCIEMOPMEDIAF2, pImpl)
2773{
2774 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2775 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2776 {
2777 case IEM_OP_PRF_SIZE_OP: /* SSE */
2778 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2779 {
2780 /*
2781 * Register, register.
2782 */
2783 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2784 IEM_MC_BEGIN(2, 0);
2785 IEM_MC_ARG(uint128_t *, pDst, 0);
2786 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2787 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2788 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2789 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2790 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2791 IEM_MC_ADVANCE_RIP();
2792 IEM_MC_END();
2793 }
2794 else
2795 {
2796 /*
2797 * Register, memory.
2798 */
2799 IEM_MC_BEGIN(2, 2);
2800 IEM_MC_ARG(uint128_t *, pDst, 0);
2801 IEM_MC_LOCAL(uint128_t, uSrc);
2802 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2803 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2804
2805 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2806 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2807 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2808 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2809
2810 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2811 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2812
2813 IEM_MC_ADVANCE_RIP();
2814 IEM_MC_END();
2815 }
2816 return VINF_SUCCESS;
2817
2818 case 0: /* MMX */
2819 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2820 {
2821 /*
2822 * Register, register.
2823 */
2824 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2825 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2826 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2827 IEM_MC_BEGIN(2, 0);
2828 IEM_MC_ARG(uint64_t *, pDst, 0);
2829 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2830 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2831 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2832 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2833 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2834 IEM_MC_ADVANCE_RIP();
2835 IEM_MC_END();
2836 }
2837 else
2838 {
2839 /*
2840 * Register, memory.
2841 */
2842 IEM_MC_BEGIN(2, 2);
2843 IEM_MC_ARG(uint64_t *, pDst, 0);
2844 IEM_MC_LOCAL(uint64_t, uSrc);
2845 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2846 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2847
2848 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2849 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2850 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2851 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2852
2853 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2854 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2855
2856 IEM_MC_ADVANCE_RIP();
2857 IEM_MC_END();
2858 }
2859 return VINF_SUCCESS;
2860
2861 default:
2862 return IEMOP_RAISE_INVALID_OPCODE();
2863 }
2864}
2865
2866
2867/** Opcode 0x0f 0x74. */
2868FNIEMOP_DEF(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq)
2869{
2870 IEMOP_MNEMONIC("pcmpeqb");
2871 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqb);
2872}
2873
2874
2875/** Opcode 0x0f 0x75. */
2876FNIEMOP_DEF(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq)
2877{
2878 IEMOP_MNEMONIC("pcmpeqw");
2879 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqw);
2880}
2881
2882
2883/** Opcode 0x0f 0x76. */
2884FNIEMOP_DEF(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq)
2885{
2886 IEMOP_MNEMONIC("pcmpeqd");
2887 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqd);
2888}
2889
2890
2891/** Opcode 0x0f 0x77. */
2892FNIEMOP_STUB(iemOp_emms);
2893/** Opcode 0x0f 0x78. */
2894FNIEMOP_UD_STUB(iemOp_vmread_AmdGrp17);
2895/** Opcode 0x0f 0x79. */
2896FNIEMOP_UD_STUB(iemOp_vmwrite);
2897/** Opcode 0x0f 0x7c. */
2898FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
2899/** Opcode 0x0f 0x7d. */
2900FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
2901
2902
2903/** Opcode 0x0f 0x7e. */
2904FNIEMOP_DEF(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq)
2905{
2906 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2907 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2908 {
2909 case IEM_OP_PRF_SIZE_OP: /* SSE */
2910 IEMOP_MNEMONIC("movd/q Ed/q,Wd/q");
2911 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2912 {
2913 /* greg, XMM */
2914 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2915 IEM_MC_BEGIN(0, 1);
2916 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2917 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2918 {
2919 IEM_MC_LOCAL(uint64_t, u64Tmp);
2920 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2921 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
2922 }
2923 else
2924 {
2925 IEM_MC_LOCAL(uint32_t, u32Tmp);
2926 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2927 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
2928 }
2929 IEM_MC_ADVANCE_RIP();
2930 IEM_MC_END();
2931 }
2932 else
2933 {
2934 /* [mem], XMM */
2935 IEM_MC_BEGIN(0, 2);
2936 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2937 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2938 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2939 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2940 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2941 {
2942 IEM_MC_LOCAL(uint64_t, u64Tmp);
2943 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2944 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
2945 }
2946 else
2947 {
2948 IEM_MC_LOCAL(uint32_t, u32Tmp);
2949 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2950 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
2951 }
2952 IEM_MC_ADVANCE_RIP();
2953 IEM_MC_END();
2954 }
2955 return VINF_SUCCESS;
2956
2957 case 0: /* MMX */
2958 IEMOP_MNEMONIC("movq/d Ed/q,Pd/q");
2959 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2960 {
2961 /* greg, MMX */
2962 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2963 IEM_MC_BEGIN(0, 1);
2964 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2965 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2966 {
2967 IEM_MC_LOCAL(uint64_t, u64Tmp);
2968 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2969 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
2970 }
2971 else
2972 {
2973 IEM_MC_LOCAL(uint32_t, u32Tmp);
2974 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2975 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
2976 }
2977 IEM_MC_ADVANCE_RIP();
2978 IEM_MC_END();
2979 }
2980 else
2981 {
2982 /* [mem], MMX */
2983 IEM_MC_BEGIN(0, 2);
2984 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2985 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2986 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2987 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2988 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2989 {
2990 IEM_MC_LOCAL(uint64_t, u64Tmp);
2991 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2992 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
2993 }
2994 else
2995 {
2996 IEM_MC_LOCAL(uint32_t, u32Tmp);
2997 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2998 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
2999 }
3000 IEM_MC_ADVANCE_RIP();
3001 IEM_MC_END();
3002 }
3003 return VINF_SUCCESS;
3004
3005 default:
3006 return IEMOP_RAISE_INVALID_OPCODE();
3007 }
3008}
3009
3010
3011/** Opcode 0x0f 0x7f. */
3012FNIEMOP_DEF(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq)
3013{
3014 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3015 bool fAligned = false;
3016 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
3017 {
3018 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
3019 fAligned = true;
3020 case IEM_OP_PRF_REPZ: /* SSE unaligned */
3021 if (fAligned)
3022 IEMOP_MNEMONIC("movdqa Wdq,Vdq");
3023 else
3024 IEMOP_MNEMONIC("movdqu Wdq,Vdq");
3025 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3026 {
3027 /*
3028 * Register, register.
3029 */
3030 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3031 IEM_MC_BEGIN(0, 1);
3032 IEM_MC_LOCAL(uint128_t, u128Tmp);
3033 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3034 IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3035 IEM_MC_STORE_XREG_U128((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u128Tmp);
3036 IEM_MC_ADVANCE_RIP();
3037 IEM_MC_END();
3038 }
3039 else
3040 {
3041 /*
3042 * Register, memory.
3043 */
3044 IEM_MC_BEGIN(0, 2);
3045 IEM_MC_LOCAL(uint128_t, u128Tmp);
3046 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3047
3048 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3049 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3050 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3051 IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3052 if (fAligned)
3053 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
3054 else
3055 IEM_MC_STORE_MEM_U128(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
3056
3057 IEM_MC_ADVANCE_RIP();
3058 IEM_MC_END();
3059 }
3060 return VINF_SUCCESS;
3061
3062 case 0: /* MMX */
3063 IEMOP_MNEMONIC("movq Qq,Pq");
3064
3065 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3066 {
3067 /*
3068 * Register, register.
3069 */
3070 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
3071 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
3072 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3073 IEM_MC_BEGIN(0, 1);
3074 IEM_MC_LOCAL(uint64_t, u64Tmp);
3075 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3076 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3077 IEM_MC_STORE_MREG_U64(bRm & X86_MODRM_RM_MASK, u64Tmp);
3078 IEM_MC_ADVANCE_RIP();
3079 IEM_MC_END();
3080 }
3081 else
3082 {
3083 /*
3084 * Register, memory.
3085 */
3086 IEM_MC_BEGIN(0, 2);
3087 IEM_MC_LOCAL(uint64_t, u64Tmp);
3088 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3089
3090 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3091 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3092 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3093 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3094 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
3095
3096 IEM_MC_ADVANCE_RIP();
3097 IEM_MC_END();
3098 }
3099 return VINF_SUCCESS;
3100
3101 default:
3102 return IEMOP_RAISE_INVALID_OPCODE();
3103 }
3104}
3105
3106
3107
3108/** Opcode 0x0f 0x80. */
3109FNIEMOP_DEF(iemOp_jo_Jv)
3110{
3111 IEMOP_MNEMONIC("jo Jv");
3112 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3113 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3114 {
3115 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3116 IEMOP_HLP_NO_LOCK_PREFIX();
3117
3118 IEM_MC_BEGIN(0, 0);
3119 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3120 IEM_MC_REL_JMP_S16(i16Imm);
3121 } IEM_MC_ELSE() {
3122 IEM_MC_ADVANCE_RIP();
3123 } IEM_MC_ENDIF();
3124 IEM_MC_END();
3125 }
3126 else
3127 {
3128 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3129 IEMOP_HLP_NO_LOCK_PREFIX();
3130
3131 IEM_MC_BEGIN(0, 0);
3132 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3133 IEM_MC_REL_JMP_S32(i32Imm);
3134 } IEM_MC_ELSE() {
3135 IEM_MC_ADVANCE_RIP();
3136 } IEM_MC_ENDIF();
3137 IEM_MC_END();
3138 }
3139 return VINF_SUCCESS;
3140}
3141
3142
3143/** Opcode 0x0f 0x81. */
3144FNIEMOP_DEF(iemOp_jno_Jv)
3145{
3146 IEMOP_MNEMONIC("jno Jv");
3147 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3148 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3149 {
3150 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3151 IEMOP_HLP_NO_LOCK_PREFIX();
3152
3153 IEM_MC_BEGIN(0, 0);
3154 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3155 IEM_MC_ADVANCE_RIP();
3156 } IEM_MC_ELSE() {
3157 IEM_MC_REL_JMP_S16(i16Imm);
3158 } IEM_MC_ENDIF();
3159 IEM_MC_END();
3160 }
3161 else
3162 {
3163 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3164 IEMOP_HLP_NO_LOCK_PREFIX();
3165
3166 IEM_MC_BEGIN(0, 0);
3167 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3168 IEM_MC_ADVANCE_RIP();
3169 } IEM_MC_ELSE() {
3170 IEM_MC_REL_JMP_S32(i32Imm);
3171 } IEM_MC_ENDIF();
3172 IEM_MC_END();
3173 }
3174 return VINF_SUCCESS;
3175}
3176
3177
3178/** Opcode 0x0f 0x82. */
3179FNIEMOP_DEF(iemOp_jc_Jv)
3180{
3181 IEMOP_MNEMONIC("jc/jb/jnae Jv");
3182 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3183 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3184 {
3185 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3186 IEMOP_HLP_NO_LOCK_PREFIX();
3187
3188 IEM_MC_BEGIN(0, 0);
3189 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3190 IEM_MC_REL_JMP_S16(i16Imm);
3191 } IEM_MC_ELSE() {
3192 IEM_MC_ADVANCE_RIP();
3193 } IEM_MC_ENDIF();
3194 IEM_MC_END();
3195 }
3196 else
3197 {
3198 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3199 IEMOP_HLP_NO_LOCK_PREFIX();
3200
3201 IEM_MC_BEGIN(0, 0);
3202 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3203 IEM_MC_REL_JMP_S32(i32Imm);
3204 } IEM_MC_ELSE() {
3205 IEM_MC_ADVANCE_RIP();
3206 } IEM_MC_ENDIF();
3207 IEM_MC_END();
3208 }
3209 return VINF_SUCCESS;
3210}
3211
3212
3213/** Opcode 0x0f 0x83. */
3214FNIEMOP_DEF(iemOp_jnc_Jv)
3215{
3216 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
3217 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3218 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3219 {
3220 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3221 IEMOP_HLP_NO_LOCK_PREFIX();
3222
3223 IEM_MC_BEGIN(0, 0);
3224 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3225 IEM_MC_ADVANCE_RIP();
3226 } IEM_MC_ELSE() {
3227 IEM_MC_REL_JMP_S16(i16Imm);
3228 } IEM_MC_ENDIF();
3229 IEM_MC_END();
3230 }
3231 else
3232 {
3233 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3234 IEMOP_HLP_NO_LOCK_PREFIX();
3235
3236 IEM_MC_BEGIN(0, 0);
3237 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3238 IEM_MC_ADVANCE_RIP();
3239 } IEM_MC_ELSE() {
3240 IEM_MC_REL_JMP_S32(i32Imm);
3241 } IEM_MC_ENDIF();
3242 IEM_MC_END();
3243 }
3244 return VINF_SUCCESS;
3245}
3246
3247
3248/** Opcode 0x0f 0x84. */
3249FNIEMOP_DEF(iemOp_je_Jv)
3250{
3251 IEMOP_MNEMONIC("je/jz Jv");
3252 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3253 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3254 {
3255 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3256 IEMOP_HLP_NO_LOCK_PREFIX();
3257
3258 IEM_MC_BEGIN(0, 0);
3259 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3260 IEM_MC_REL_JMP_S16(i16Imm);
3261 } IEM_MC_ELSE() {
3262 IEM_MC_ADVANCE_RIP();
3263 } IEM_MC_ENDIF();
3264 IEM_MC_END();
3265 }
3266 else
3267 {
3268 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3269 IEMOP_HLP_NO_LOCK_PREFIX();
3270
3271 IEM_MC_BEGIN(0, 0);
3272 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3273 IEM_MC_REL_JMP_S32(i32Imm);
3274 } IEM_MC_ELSE() {
3275 IEM_MC_ADVANCE_RIP();
3276 } IEM_MC_ENDIF();
3277 IEM_MC_END();
3278 }
3279 return VINF_SUCCESS;
3280}
3281
3282
3283/** Opcode 0x0f 0x85. */
3284FNIEMOP_DEF(iemOp_jne_Jv)
3285{
3286 IEMOP_MNEMONIC("jne/jnz Jv");
3287 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3288 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3289 {
3290 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3291 IEMOP_HLP_NO_LOCK_PREFIX();
3292
3293 IEM_MC_BEGIN(0, 0);
3294 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3295 IEM_MC_ADVANCE_RIP();
3296 } IEM_MC_ELSE() {
3297 IEM_MC_REL_JMP_S16(i16Imm);
3298 } IEM_MC_ENDIF();
3299 IEM_MC_END();
3300 }
3301 else
3302 {
3303 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3304 IEMOP_HLP_NO_LOCK_PREFIX();
3305
3306 IEM_MC_BEGIN(0, 0);
3307 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3308 IEM_MC_ADVANCE_RIP();
3309 } IEM_MC_ELSE() {
3310 IEM_MC_REL_JMP_S32(i32Imm);
3311 } IEM_MC_ENDIF();
3312 IEM_MC_END();
3313 }
3314 return VINF_SUCCESS;
3315}
3316
3317
3318/** Opcode 0x0f 0x86. */
3319FNIEMOP_DEF(iemOp_jbe_Jv)
3320{
3321 IEMOP_MNEMONIC("jbe/jna Jv");
3322 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3323 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3324 {
3325 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3326 IEMOP_HLP_NO_LOCK_PREFIX();
3327
3328 IEM_MC_BEGIN(0, 0);
3329 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3330 IEM_MC_REL_JMP_S16(i16Imm);
3331 } IEM_MC_ELSE() {
3332 IEM_MC_ADVANCE_RIP();
3333 } IEM_MC_ENDIF();
3334 IEM_MC_END();
3335 }
3336 else
3337 {
3338 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3339 IEMOP_HLP_NO_LOCK_PREFIX();
3340
3341 IEM_MC_BEGIN(0, 0);
3342 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3343 IEM_MC_REL_JMP_S32(i32Imm);
3344 } IEM_MC_ELSE() {
3345 IEM_MC_ADVANCE_RIP();
3346 } IEM_MC_ENDIF();
3347 IEM_MC_END();
3348 }
3349 return VINF_SUCCESS;
3350}
3351
3352
3353/** Opcode 0x0f 0x87. */
3354FNIEMOP_DEF(iemOp_jnbe_Jv)
3355{
3356 IEMOP_MNEMONIC("jnbe/ja Jv");
3357 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3358 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3359 {
3360 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3361 IEMOP_HLP_NO_LOCK_PREFIX();
3362
3363 IEM_MC_BEGIN(0, 0);
3364 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3365 IEM_MC_ADVANCE_RIP();
3366 } IEM_MC_ELSE() {
3367 IEM_MC_REL_JMP_S16(i16Imm);
3368 } IEM_MC_ENDIF();
3369 IEM_MC_END();
3370 }
3371 else
3372 {
3373 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3374 IEMOP_HLP_NO_LOCK_PREFIX();
3375
3376 IEM_MC_BEGIN(0, 0);
3377 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3378 IEM_MC_ADVANCE_RIP();
3379 } IEM_MC_ELSE() {
3380 IEM_MC_REL_JMP_S32(i32Imm);
3381 } IEM_MC_ENDIF();
3382 IEM_MC_END();
3383 }
3384 return VINF_SUCCESS;
3385}
3386
3387
3388/** Opcode 0x0f 0x88. */
3389FNIEMOP_DEF(iemOp_js_Jv)
3390{
3391 IEMOP_MNEMONIC("js Jv");
3392 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3393 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3394 {
3395 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3396 IEMOP_HLP_NO_LOCK_PREFIX();
3397
3398 IEM_MC_BEGIN(0, 0);
3399 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3400 IEM_MC_REL_JMP_S16(i16Imm);
3401 } IEM_MC_ELSE() {
3402 IEM_MC_ADVANCE_RIP();
3403 } IEM_MC_ENDIF();
3404 IEM_MC_END();
3405 }
3406 else
3407 {
3408 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3409 IEMOP_HLP_NO_LOCK_PREFIX();
3410
3411 IEM_MC_BEGIN(0, 0);
3412 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3413 IEM_MC_REL_JMP_S32(i32Imm);
3414 } IEM_MC_ELSE() {
3415 IEM_MC_ADVANCE_RIP();
3416 } IEM_MC_ENDIF();
3417 IEM_MC_END();
3418 }
3419 return VINF_SUCCESS;
3420}
3421
3422
3423/** Opcode 0x0f 0x89. */
3424FNIEMOP_DEF(iemOp_jns_Jv)
3425{
3426 IEMOP_MNEMONIC("jns Jv");
3427 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3428 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3429 {
3430 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3431 IEMOP_HLP_NO_LOCK_PREFIX();
3432
3433 IEM_MC_BEGIN(0, 0);
3434 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3435 IEM_MC_ADVANCE_RIP();
3436 } IEM_MC_ELSE() {
3437 IEM_MC_REL_JMP_S16(i16Imm);
3438 } IEM_MC_ENDIF();
3439 IEM_MC_END();
3440 }
3441 else
3442 {
3443 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3444 IEMOP_HLP_NO_LOCK_PREFIX();
3445
3446 IEM_MC_BEGIN(0, 0);
3447 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3448 IEM_MC_ADVANCE_RIP();
3449 } IEM_MC_ELSE() {
3450 IEM_MC_REL_JMP_S32(i32Imm);
3451 } IEM_MC_ENDIF();
3452 IEM_MC_END();
3453 }
3454 return VINF_SUCCESS;
3455}
3456
3457
3458/** Opcode 0x0f 0x8a. */
3459FNIEMOP_DEF(iemOp_jp_Jv)
3460{
3461 IEMOP_MNEMONIC("jp Jv");
3462 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3463 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3464 {
3465 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3466 IEMOP_HLP_NO_LOCK_PREFIX();
3467
3468 IEM_MC_BEGIN(0, 0);
3469 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3470 IEM_MC_REL_JMP_S16(i16Imm);
3471 } IEM_MC_ELSE() {
3472 IEM_MC_ADVANCE_RIP();
3473 } IEM_MC_ENDIF();
3474 IEM_MC_END();
3475 }
3476 else
3477 {
3478 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3479 IEMOP_HLP_NO_LOCK_PREFIX();
3480
3481 IEM_MC_BEGIN(0, 0);
3482 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3483 IEM_MC_REL_JMP_S32(i32Imm);
3484 } IEM_MC_ELSE() {
3485 IEM_MC_ADVANCE_RIP();
3486 } IEM_MC_ENDIF();
3487 IEM_MC_END();
3488 }
3489 return VINF_SUCCESS;
3490}
3491
3492
3493/** Opcode 0x0f 0x8b. */
3494FNIEMOP_DEF(iemOp_jnp_Jv)
3495{
3496 IEMOP_MNEMONIC("jo Jv");
3497 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3498 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3499 {
3500 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3501 IEMOP_HLP_NO_LOCK_PREFIX();
3502
3503 IEM_MC_BEGIN(0, 0);
3504 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3505 IEM_MC_ADVANCE_RIP();
3506 } IEM_MC_ELSE() {
3507 IEM_MC_REL_JMP_S16(i16Imm);
3508 } IEM_MC_ENDIF();
3509 IEM_MC_END();
3510 }
3511 else
3512 {
3513 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3514 IEMOP_HLP_NO_LOCK_PREFIX();
3515
3516 IEM_MC_BEGIN(0, 0);
3517 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3518 IEM_MC_ADVANCE_RIP();
3519 } IEM_MC_ELSE() {
3520 IEM_MC_REL_JMP_S32(i32Imm);
3521 } IEM_MC_ENDIF();
3522 IEM_MC_END();
3523 }
3524 return VINF_SUCCESS;
3525}
3526
3527
3528/** Opcode 0x0f 0x8c. */
3529FNIEMOP_DEF(iemOp_jl_Jv)
3530{
3531 IEMOP_MNEMONIC("jl/jnge Jv");
3532 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3533 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3534 {
3535 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3536 IEMOP_HLP_NO_LOCK_PREFIX();
3537
3538 IEM_MC_BEGIN(0, 0);
3539 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3540 IEM_MC_REL_JMP_S16(i16Imm);
3541 } IEM_MC_ELSE() {
3542 IEM_MC_ADVANCE_RIP();
3543 } IEM_MC_ENDIF();
3544 IEM_MC_END();
3545 }
3546 else
3547 {
3548 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3549 IEMOP_HLP_NO_LOCK_PREFIX();
3550
3551 IEM_MC_BEGIN(0, 0);
3552 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3553 IEM_MC_REL_JMP_S32(i32Imm);
3554 } IEM_MC_ELSE() {
3555 IEM_MC_ADVANCE_RIP();
3556 } IEM_MC_ENDIF();
3557 IEM_MC_END();
3558 }
3559 return VINF_SUCCESS;
3560}
3561
3562
3563/** Opcode 0x0f 0x8d. */
3564FNIEMOP_DEF(iemOp_jnl_Jv)
3565{
3566 IEMOP_MNEMONIC("jnl/jge Jv");
3567 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3568 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3569 {
3570 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3571 IEMOP_HLP_NO_LOCK_PREFIX();
3572
3573 IEM_MC_BEGIN(0, 0);
3574 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3575 IEM_MC_ADVANCE_RIP();
3576 } IEM_MC_ELSE() {
3577 IEM_MC_REL_JMP_S16(i16Imm);
3578 } IEM_MC_ENDIF();
3579 IEM_MC_END();
3580 }
3581 else
3582 {
3583 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3584 IEMOP_HLP_NO_LOCK_PREFIX();
3585
3586 IEM_MC_BEGIN(0, 0);
3587 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3588 IEM_MC_ADVANCE_RIP();
3589 } IEM_MC_ELSE() {
3590 IEM_MC_REL_JMP_S32(i32Imm);
3591 } IEM_MC_ENDIF();
3592 IEM_MC_END();
3593 }
3594 return VINF_SUCCESS;
3595}
3596
3597
3598/** Opcode 0x0f 0x8e. */
3599FNIEMOP_DEF(iemOp_jle_Jv)
3600{
3601 IEMOP_MNEMONIC("jle/jng Jv");
3602 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3603 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3604 {
3605 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3606 IEMOP_HLP_NO_LOCK_PREFIX();
3607
3608 IEM_MC_BEGIN(0, 0);
3609 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3610 IEM_MC_REL_JMP_S16(i16Imm);
3611 } IEM_MC_ELSE() {
3612 IEM_MC_ADVANCE_RIP();
3613 } IEM_MC_ENDIF();
3614 IEM_MC_END();
3615 }
3616 else
3617 {
3618 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3619 IEMOP_HLP_NO_LOCK_PREFIX();
3620
3621 IEM_MC_BEGIN(0, 0);
3622 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3623 IEM_MC_REL_JMP_S32(i32Imm);
3624 } IEM_MC_ELSE() {
3625 IEM_MC_ADVANCE_RIP();
3626 } IEM_MC_ENDIF();
3627 IEM_MC_END();
3628 }
3629 return VINF_SUCCESS;
3630}
3631
3632
3633/** Opcode 0x0f 0x8f. */
3634FNIEMOP_DEF(iemOp_jnle_Jv)
3635{
3636 IEMOP_MNEMONIC("jnle/jg Jv");
3637 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3638 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3639 {
3640 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3641 IEMOP_HLP_NO_LOCK_PREFIX();
3642
3643 IEM_MC_BEGIN(0, 0);
3644 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3645 IEM_MC_ADVANCE_RIP();
3646 } IEM_MC_ELSE() {
3647 IEM_MC_REL_JMP_S16(i16Imm);
3648 } IEM_MC_ENDIF();
3649 IEM_MC_END();
3650 }
3651 else
3652 {
3653 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3654 IEMOP_HLP_NO_LOCK_PREFIX();
3655
3656 IEM_MC_BEGIN(0, 0);
3657 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3658 IEM_MC_ADVANCE_RIP();
3659 } IEM_MC_ELSE() {
3660 IEM_MC_REL_JMP_S32(i32Imm);
3661 } IEM_MC_ENDIF();
3662 IEM_MC_END();
3663 }
3664 return VINF_SUCCESS;
3665}
3666
3667
3668/** Opcode 0x0f 0x90. */
3669FNIEMOP_DEF(iemOp_seto_Eb)
3670{
3671 IEMOP_MNEMONIC("seto Eb");
3672 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3673 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3674
3675 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3676 * any way. AMD says it's "unused", whatever that means. We're
3677 * ignoring for now. */
3678 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3679 {
3680 /* register target */
3681 IEM_MC_BEGIN(0, 0);
3682 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3683 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3684 } IEM_MC_ELSE() {
3685 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3686 } IEM_MC_ENDIF();
3687 IEM_MC_ADVANCE_RIP();
3688 IEM_MC_END();
3689 }
3690 else
3691 {
3692 /* memory target */
3693 IEM_MC_BEGIN(0, 1);
3694 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3695 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3696 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3697 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3698 } IEM_MC_ELSE() {
3699 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3700 } IEM_MC_ENDIF();
3701 IEM_MC_ADVANCE_RIP();
3702 IEM_MC_END();
3703 }
3704 return VINF_SUCCESS;
3705}
3706
3707
3708/** Opcode 0x0f 0x91. */
3709FNIEMOP_DEF(iemOp_setno_Eb)
3710{
3711 IEMOP_MNEMONIC("setno Eb");
3712 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3713 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3714
3715 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3716 * any way. AMD says it's "unused", whatever that means. We're
3717 * ignoring for now. */
3718 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3719 {
3720 /* register target */
3721 IEM_MC_BEGIN(0, 0);
3722 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3723 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3724 } IEM_MC_ELSE() {
3725 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3726 } IEM_MC_ENDIF();
3727 IEM_MC_ADVANCE_RIP();
3728 IEM_MC_END();
3729 }
3730 else
3731 {
3732 /* memory target */
3733 IEM_MC_BEGIN(0, 1);
3734 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3735 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3736 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3737 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3738 } IEM_MC_ELSE() {
3739 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3740 } IEM_MC_ENDIF();
3741 IEM_MC_ADVANCE_RIP();
3742 IEM_MC_END();
3743 }
3744 return VINF_SUCCESS;
3745}
3746
3747
3748/** Opcode 0x0f 0x92. */
3749FNIEMOP_DEF(iemOp_setc_Eb)
3750{
3751 IEMOP_MNEMONIC("setc Eb");
3752 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3753 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3754
3755 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3756 * any way. AMD says it's "unused", whatever that means. We're
3757 * ignoring for now. */
3758 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3759 {
3760 /* register target */
3761 IEM_MC_BEGIN(0, 0);
3762 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3763 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3764 } IEM_MC_ELSE() {
3765 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3766 } IEM_MC_ENDIF();
3767 IEM_MC_ADVANCE_RIP();
3768 IEM_MC_END();
3769 }
3770 else
3771 {
3772 /* memory target */
3773 IEM_MC_BEGIN(0, 1);
3774 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3775 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3776 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3777 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3778 } IEM_MC_ELSE() {
3779 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3780 } IEM_MC_ENDIF();
3781 IEM_MC_ADVANCE_RIP();
3782 IEM_MC_END();
3783 }
3784 return VINF_SUCCESS;
3785}
3786
3787
3788/** Opcode 0x0f 0x93. */
3789FNIEMOP_DEF(iemOp_setnc_Eb)
3790{
3791 IEMOP_MNEMONIC("setnc Eb");
3792 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3793 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3794
3795 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3796 * any way. AMD says it's "unused", whatever that means. We're
3797 * ignoring for now. */
3798 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3799 {
3800 /* register target */
3801 IEM_MC_BEGIN(0, 0);
3802 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3803 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3804 } IEM_MC_ELSE() {
3805 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3806 } IEM_MC_ENDIF();
3807 IEM_MC_ADVANCE_RIP();
3808 IEM_MC_END();
3809 }
3810 else
3811 {
3812 /* memory target */
3813 IEM_MC_BEGIN(0, 1);
3814 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3815 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3816 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3817 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3818 } IEM_MC_ELSE() {
3819 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3820 } IEM_MC_ENDIF();
3821 IEM_MC_ADVANCE_RIP();
3822 IEM_MC_END();
3823 }
3824 return VINF_SUCCESS;
3825}
3826
3827
3828/** Opcode 0x0f 0x94. */
3829FNIEMOP_DEF(iemOp_sete_Eb)
3830{
3831 IEMOP_MNEMONIC("sete Eb");
3832 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3833 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3834
3835 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3836 * any way. AMD says it's "unused", whatever that means. We're
3837 * ignoring for now. */
3838 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3839 {
3840 /* register target */
3841 IEM_MC_BEGIN(0, 0);
3842 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3843 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3844 } IEM_MC_ELSE() {
3845 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3846 } IEM_MC_ENDIF();
3847 IEM_MC_ADVANCE_RIP();
3848 IEM_MC_END();
3849 }
3850 else
3851 {
3852 /* memory target */
3853 IEM_MC_BEGIN(0, 1);
3854 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3855 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3856 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3857 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3858 } IEM_MC_ELSE() {
3859 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3860 } IEM_MC_ENDIF();
3861 IEM_MC_ADVANCE_RIP();
3862 IEM_MC_END();
3863 }
3864 return VINF_SUCCESS;
3865}
3866
3867
3868/** Opcode 0x0f 0x95. */
3869FNIEMOP_DEF(iemOp_setne_Eb)
3870{
3871 IEMOP_MNEMONIC("setne Eb");
3872 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3873 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3874
3875 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3876 * any way. AMD says it's "unused", whatever that means. We're
3877 * ignoring for now. */
3878 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3879 {
3880 /* register target */
3881 IEM_MC_BEGIN(0, 0);
3882 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3883 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3884 } IEM_MC_ELSE() {
3885 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3886 } IEM_MC_ENDIF();
3887 IEM_MC_ADVANCE_RIP();
3888 IEM_MC_END();
3889 }
3890 else
3891 {
3892 /* memory target */
3893 IEM_MC_BEGIN(0, 1);
3894 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3895 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3896 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3897 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3898 } IEM_MC_ELSE() {
3899 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3900 } IEM_MC_ENDIF();
3901 IEM_MC_ADVANCE_RIP();
3902 IEM_MC_END();
3903 }
3904 return VINF_SUCCESS;
3905}
3906
3907
3908/** Opcode 0x0f 0x96. */
3909FNIEMOP_DEF(iemOp_setbe_Eb)
3910{
3911 IEMOP_MNEMONIC("setbe Eb");
3912 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3913 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3914
3915 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3916 * any way. AMD says it's "unused", whatever that means. We're
3917 * ignoring for now. */
3918 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3919 {
3920 /* register target */
3921 IEM_MC_BEGIN(0, 0);
3922 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3923 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3924 } IEM_MC_ELSE() {
3925 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3926 } IEM_MC_ENDIF();
3927 IEM_MC_ADVANCE_RIP();
3928 IEM_MC_END();
3929 }
3930 else
3931 {
3932 /* memory target */
3933 IEM_MC_BEGIN(0, 1);
3934 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3935 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3936 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3937 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3938 } IEM_MC_ELSE() {
3939 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3940 } IEM_MC_ENDIF();
3941 IEM_MC_ADVANCE_RIP();
3942 IEM_MC_END();
3943 }
3944 return VINF_SUCCESS;
3945}
3946
3947
3948/** Opcode 0x0f 0x97. */
3949FNIEMOP_DEF(iemOp_setnbe_Eb)
3950{
3951 IEMOP_MNEMONIC("setnbe Eb");
3952 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3953 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3954
3955 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3956 * any way. AMD says it's "unused", whatever that means. We're
3957 * ignoring for now. */
3958 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3959 {
3960 /* register target */
3961 IEM_MC_BEGIN(0, 0);
3962 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3963 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3964 } IEM_MC_ELSE() {
3965 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3966 } IEM_MC_ENDIF();
3967 IEM_MC_ADVANCE_RIP();
3968 IEM_MC_END();
3969 }
3970 else
3971 {
3972 /* memory target */
3973 IEM_MC_BEGIN(0, 1);
3974 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3975 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3976 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3977 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3978 } IEM_MC_ELSE() {
3979 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3980 } IEM_MC_ENDIF();
3981 IEM_MC_ADVANCE_RIP();
3982 IEM_MC_END();
3983 }
3984 return VINF_SUCCESS;
3985}
3986
3987
3988/** Opcode 0x0f 0x98. */
3989FNIEMOP_DEF(iemOp_sets_Eb)
3990{
3991 IEMOP_MNEMONIC("sets Eb");
3992 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3993 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3994
3995 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3996 * any way. AMD says it's "unused", whatever that means. We're
3997 * ignoring for now. */
3998 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3999 {
4000 /* register target */
4001 IEM_MC_BEGIN(0, 0);
4002 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4003 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4004 } IEM_MC_ELSE() {
4005 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4006 } IEM_MC_ENDIF();
4007 IEM_MC_ADVANCE_RIP();
4008 IEM_MC_END();
4009 }
4010 else
4011 {
4012 /* memory target */
4013 IEM_MC_BEGIN(0, 1);
4014 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4015 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4016 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4017 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4018 } IEM_MC_ELSE() {
4019 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4020 } IEM_MC_ENDIF();
4021 IEM_MC_ADVANCE_RIP();
4022 IEM_MC_END();
4023 }
4024 return VINF_SUCCESS;
4025}
4026
4027
4028/** Opcode 0x0f 0x99. */
4029FNIEMOP_DEF(iemOp_setns_Eb)
4030{
4031 IEMOP_MNEMONIC("setns Eb");
4032 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4033 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4034
4035 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4036 * any way. AMD says it's "unused", whatever that means. We're
4037 * ignoring for now. */
4038 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4039 {
4040 /* register target */
4041 IEM_MC_BEGIN(0, 0);
4042 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4043 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4044 } IEM_MC_ELSE() {
4045 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4046 } IEM_MC_ENDIF();
4047 IEM_MC_ADVANCE_RIP();
4048 IEM_MC_END();
4049 }
4050 else
4051 {
4052 /* memory target */
4053 IEM_MC_BEGIN(0, 1);
4054 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4055 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4056 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4057 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4058 } IEM_MC_ELSE() {
4059 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4060 } IEM_MC_ENDIF();
4061 IEM_MC_ADVANCE_RIP();
4062 IEM_MC_END();
4063 }
4064 return VINF_SUCCESS;
4065}
4066
4067
4068/** Opcode 0x0f 0x9a. */
4069FNIEMOP_DEF(iemOp_setp_Eb)
4070{
4071 IEMOP_MNEMONIC("setnp Eb");
4072 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4073 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4074
4075 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4076 * any way. AMD says it's "unused", whatever that means. We're
4077 * ignoring for now. */
4078 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4079 {
4080 /* register target */
4081 IEM_MC_BEGIN(0, 0);
4082 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4083 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4084 } IEM_MC_ELSE() {
4085 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4086 } IEM_MC_ENDIF();
4087 IEM_MC_ADVANCE_RIP();
4088 IEM_MC_END();
4089 }
4090 else
4091 {
4092 /* memory target */
4093 IEM_MC_BEGIN(0, 1);
4094 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4095 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4096 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4097 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4098 } IEM_MC_ELSE() {
4099 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4100 } IEM_MC_ENDIF();
4101 IEM_MC_ADVANCE_RIP();
4102 IEM_MC_END();
4103 }
4104 return VINF_SUCCESS;
4105}
4106
4107
4108/** Opcode 0x0f 0x9b. */
4109FNIEMOP_DEF(iemOp_setnp_Eb)
4110{
4111 IEMOP_MNEMONIC("setnp Eb");
4112 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4113 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4114
4115 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4116 * any way. AMD says it's "unused", whatever that means. We're
4117 * ignoring for now. */
4118 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4119 {
4120 /* register target */
4121 IEM_MC_BEGIN(0, 0);
4122 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4123 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4124 } IEM_MC_ELSE() {
4125 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4126 } IEM_MC_ENDIF();
4127 IEM_MC_ADVANCE_RIP();
4128 IEM_MC_END();
4129 }
4130 else
4131 {
4132 /* memory target */
4133 IEM_MC_BEGIN(0, 1);
4134 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4135 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4136 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4137 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4138 } IEM_MC_ELSE() {
4139 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4140 } IEM_MC_ENDIF();
4141 IEM_MC_ADVANCE_RIP();
4142 IEM_MC_END();
4143 }
4144 return VINF_SUCCESS;
4145}
4146
4147
4148/** Opcode 0x0f 0x9c. */
4149FNIEMOP_DEF(iemOp_setl_Eb)
4150{
4151 IEMOP_MNEMONIC("setl Eb");
4152 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4153 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4154
4155 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4156 * any way. AMD says it's "unused", whatever that means. We're
4157 * ignoring for now. */
4158 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4159 {
4160 /* register target */
4161 IEM_MC_BEGIN(0, 0);
4162 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4163 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4164 } IEM_MC_ELSE() {
4165 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4166 } IEM_MC_ENDIF();
4167 IEM_MC_ADVANCE_RIP();
4168 IEM_MC_END();
4169 }
4170 else
4171 {
4172 /* memory target */
4173 IEM_MC_BEGIN(0, 1);
4174 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4175 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4176 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4177 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4178 } IEM_MC_ELSE() {
4179 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4180 } IEM_MC_ENDIF();
4181 IEM_MC_ADVANCE_RIP();
4182 IEM_MC_END();
4183 }
4184 return VINF_SUCCESS;
4185}
4186
4187
4188/** Opcode 0x0f 0x9d. */
4189FNIEMOP_DEF(iemOp_setnl_Eb)
4190{
4191 IEMOP_MNEMONIC("setnl Eb");
4192 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4193 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4194
4195 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4196 * any way. AMD says it's "unused", whatever that means. We're
4197 * ignoring for now. */
4198 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4199 {
4200 /* register target */
4201 IEM_MC_BEGIN(0, 0);
4202 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4203 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4204 } IEM_MC_ELSE() {
4205 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4206 } IEM_MC_ENDIF();
4207 IEM_MC_ADVANCE_RIP();
4208 IEM_MC_END();
4209 }
4210 else
4211 {
4212 /* memory target */
4213 IEM_MC_BEGIN(0, 1);
4214 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4215 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4216 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4217 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4218 } IEM_MC_ELSE() {
4219 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4220 } IEM_MC_ENDIF();
4221 IEM_MC_ADVANCE_RIP();
4222 IEM_MC_END();
4223 }
4224 return VINF_SUCCESS;
4225}
4226
4227
4228/** Opcode 0x0f 0x9e. */
4229FNIEMOP_DEF(iemOp_setle_Eb)
4230{
4231 IEMOP_MNEMONIC("setle Eb");
4232 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4233 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4234
4235 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4236 * any way. AMD says it's "unused", whatever that means. We're
4237 * ignoring for now. */
4238 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4239 {
4240 /* register target */
4241 IEM_MC_BEGIN(0, 0);
4242 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4243 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4244 } IEM_MC_ELSE() {
4245 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4246 } IEM_MC_ENDIF();
4247 IEM_MC_ADVANCE_RIP();
4248 IEM_MC_END();
4249 }
4250 else
4251 {
4252 /* memory target */
4253 IEM_MC_BEGIN(0, 1);
4254 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4255 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4256 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4257 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4258 } IEM_MC_ELSE() {
4259 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4260 } IEM_MC_ENDIF();
4261 IEM_MC_ADVANCE_RIP();
4262 IEM_MC_END();
4263 }
4264 return VINF_SUCCESS;
4265}
4266
4267
4268/** Opcode 0x0f 0x9f. */
4269FNIEMOP_DEF(iemOp_setnle_Eb)
4270{
4271 IEMOP_MNEMONIC("setnle Eb");
4272 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4273 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4274
4275 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4276 * any way. AMD says it's "unused", whatever that means. We're
4277 * ignoring for now. */
4278 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4279 {
4280 /* register target */
4281 IEM_MC_BEGIN(0, 0);
4282 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4283 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4284 } IEM_MC_ELSE() {
4285 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4286 } IEM_MC_ENDIF();
4287 IEM_MC_ADVANCE_RIP();
4288 IEM_MC_END();
4289 }
4290 else
4291 {
4292 /* memory target */
4293 IEM_MC_BEGIN(0, 1);
4294 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4295 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4296 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4297 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4298 } IEM_MC_ELSE() {
4299 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4300 } IEM_MC_ENDIF();
4301 IEM_MC_ADVANCE_RIP();
4302 IEM_MC_END();
4303 }
4304 return VINF_SUCCESS;
4305}
4306
4307
4308/**
4309 * Common 'push segment-register' helper.
4310 */
4311FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
4312{
4313 IEMOP_HLP_NO_LOCK_PREFIX();
4314 if (iReg < X86_SREG_FS)
4315 IEMOP_HLP_NO_64BIT();
4316 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
4317
4318 switch (pIemCpu->enmEffOpSize)
4319 {
4320 case IEMMODE_16BIT:
4321 IEM_MC_BEGIN(0, 1);
4322 IEM_MC_LOCAL(uint16_t, u16Value);
4323 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
4324 IEM_MC_PUSH_U16(u16Value);
4325 IEM_MC_ADVANCE_RIP();
4326 IEM_MC_END();
4327 break;
4328
4329 case IEMMODE_32BIT:
4330 IEM_MC_BEGIN(0, 1);
4331 IEM_MC_LOCAL(uint32_t, u32Value);
4332 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
4333 IEM_MC_PUSH_U32_SREG(u32Value);
4334 IEM_MC_ADVANCE_RIP();
4335 IEM_MC_END();
4336 break;
4337
4338 case IEMMODE_64BIT:
4339 IEM_MC_BEGIN(0, 1);
4340 IEM_MC_LOCAL(uint64_t, u64Value);
4341 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
4342 IEM_MC_PUSH_U64(u64Value);
4343 IEM_MC_ADVANCE_RIP();
4344 IEM_MC_END();
4345 break;
4346 }
4347
4348 return VINF_SUCCESS;
4349}
4350
4351
4352/** Opcode 0x0f 0xa0. */
4353FNIEMOP_DEF(iemOp_push_fs)
4354{
4355 IEMOP_MNEMONIC("push fs");
4356 IEMOP_HLP_NO_LOCK_PREFIX();
4357 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
4358}
4359
4360
4361/** Opcode 0x0f 0xa1. */
4362FNIEMOP_DEF(iemOp_pop_fs)
4363{
4364 IEMOP_MNEMONIC("pop fs");
4365 IEMOP_HLP_NO_LOCK_PREFIX();
4366 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
4367}
4368
4369
4370/** Opcode 0x0f 0xa2. */
4371FNIEMOP_DEF(iemOp_cpuid)
4372{
4373 IEMOP_MNEMONIC("cpuid");
4374 IEMOP_HLP_NO_LOCK_PREFIX();
4375 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
4376}
4377
4378
4379/**
4380 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
4381 * iemOp_bts_Ev_Gv.
4382 */
4383FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
4384{
4385 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4386 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
4387
4388 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4389 {
4390 /* register destination. */
4391 IEMOP_HLP_NO_LOCK_PREFIX();
4392 switch (pIemCpu->enmEffOpSize)
4393 {
4394 case IEMMODE_16BIT:
4395 IEM_MC_BEGIN(3, 0);
4396 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4397 IEM_MC_ARG(uint16_t, u16Src, 1);
4398 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4399
4400 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4401 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
4402 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4403 IEM_MC_REF_EFLAGS(pEFlags);
4404 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
4405
4406 IEM_MC_ADVANCE_RIP();
4407 IEM_MC_END();
4408 return VINF_SUCCESS;
4409
4410 case IEMMODE_32BIT:
4411 IEM_MC_BEGIN(3, 0);
4412 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4413 IEM_MC_ARG(uint32_t, u32Src, 1);
4414 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4415
4416 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4417 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
4418 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4419 IEM_MC_REF_EFLAGS(pEFlags);
4420 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
4421
4422 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4423 IEM_MC_ADVANCE_RIP();
4424 IEM_MC_END();
4425 return VINF_SUCCESS;
4426
4427 case IEMMODE_64BIT:
4428 IEM_MC_BEGIN(3, 0);
4429 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4430 IEM_MC_ARG(uint64_t, u64Src, 1);
4431 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4432
4433 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4434 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
4435 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4436 IEM_MC_REF_EFLAGS(pEFlags);
4437 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
4438
4439 IEM_MC_ADVANCE_RIP();
4440 IEM_MC_END();
4441 return VINF_SUCCESS;
4442
4443 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4444 }
4445 }
4446 else
4447 {
4448 /* memory destination. */
4449
4450 uint32_t fAccess;
4451 if (pImpl->pfnLockedU16)
4452 fAccess = IEM_ACCESS_DATA_RW;
4453 else /* BT */
4454 {
4455 IEMOP_HLP_NO_LOCK_PREFIX();
4456 fAccess = IEM_ACCESS_DATA_R;
4457 }
4458
4459 /** @todo test negative bit offsets! */
4460 switch (pIemCpu->enmEffOpSize)
4461 {
4462 case IEMMODE_16BIT:
4463 IEM_MC_BEGIN(3, 2);
4464 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4465 IEM_MC_ARG(uint16_t, u16Src, 1);
4466 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4467 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4468 IEM_MC_LOCAL(int16_t, i16AddrAdj);
4469
4470 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4471 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4472 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
4473 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
4474 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
4475 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
4476 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
4477 IEM_MC_FETCH_EFLAGS(EFlags);
4478
4479 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4480 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4481 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
4482 else
4483 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
4484 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4485
4486 IEM_MC_COMMIT_EFLAGS(EFlags);
4487 IEM_MC_ADVANCE_RIP();
4488 IEM_MC_END();
4489 return VINF_SUCCESS;
4490
4491 case IEMMODE_32BIT:
4492 IEM_MC_BEGIN(3, 2);
4493 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4494 IEM_MC_ARG(uint32_t, u32Src, 1);
4495 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4496 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4497 IEM_MC_LOCAL(int32_t, i32AddrAdj);
4498
4499 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4500 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4501 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
4502 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
4503 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
4504 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
4505 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
4506 IEM_MC_FETCH_EFLAGS(EFlags);
4507
4508 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4509 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4510 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
4511 else
4512 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
4513 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4514
4515 IEM_MC_COMMIT_EFLAGS(EFlags);
4516 IEM_MC_ADVANCE_RIP();
4517 IEM_MC_END();
4518 return VINF_SUCCESS;
4519
4520 case IEMMODE_64BIT:
4521 IEM_MC_BEGIN(3, 2);
4522 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4523 IEM_MC_ARG(uint64_t, u64Src, 1);
4524 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4525 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4526 IEM_MC_LOCAL(int64_t, i64AddrAdj);
4527
4528 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4529 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4530 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
4531 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
4532 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
4533 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
4534 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
4535 IEM_MC_FETCH_EFLAGS(EFlags);
4536
4537 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4538 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4539 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
4540 else
4541 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
4542 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4543
4544 IEM_MC_COMMIT_EFLAGS(EFlags);
4545 IEM_MC_ADVANCE_RIP();
4546 IEM_MC_END();
4547 return VINF_SUCCESS;
4548
4549 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4550 }
4551 }
4552}
4553
4554
4555/** Opcode 0x0f 0xa3. */
4556FNIEMOP_DEF(iemOp_bt_Ev_Gv)
4557{
4558 IEMOP_MNEMONIC("bt Gv,Gv");
4559 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
4560}
4561
4562
4563/**
4564 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
4565 */
4566FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
4567{
4568 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4569 IEMOP_HLP_NO_LOCK_PREFIX();
4570 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
4571
4572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4573 {
4574 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4575 IEMOP_HLP_NO_LOCK_PREFIX();
4576
4577 switch (pIemCpu->enmEffOpSize)
4578 {
4579 case IEMMODE_16BIT:
4580 IEM_MC_BEGIN(4, 0);
4581 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4582 IEM_MC_ARG(uint16_t, u16Src, 1);
4583 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4584 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4585
4586 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4587 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4588 IEM_MC_REF_EFLAGS(pEFlags);
4589 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4590
4591 IEM_MC_ADVANCE_RIP();
4592 IEM_MC_END();
4593 return VINF_SUCCESS;
4594
4595 case IEMMODE_32BIT:
4596 IEM_MC_BEGIN(4, 0);
4597 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4598 IEM_MC_ARG(uint32_t, u32Src, 1);
4599 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4600 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4601
4602 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4603 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4604 IEM_MC_REF_EFLAGS(pEFlags);
4605 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4606
4607 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4608 IEM_MC_ADVANCE_RIP();
4609 IEM_MC_END();
4610 return VINF_SUCCESS;
4611
4612 case IEMMODE_64BIT:
4613 IEM_MC_BEGIN(4, 0);
4614 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4615 IEM_MC_ARG(uint64_t, u64Src, 1);
4616 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4617 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4618
4619 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4620 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4621 IEM_MC_REF_EFLAGS(pEFlags);
4622 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4623
4624 IEM_MC_ADVANCE_RIP();
4625 IEM_MC_END();
4626 return VINF_SUCCESS;
4627
4628 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4629 }
4630 }
4631 else
4632 {
4633 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4634
4635 switch (pIemCpu->enmEffOpSize)
4636 {
4637 case IEMMODE_16BIT:
4638 IEM_MC_BEGIN(4, 2);
4639 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4640 IEM_MC_ARG(uint16_t, u16Src, 1);
4641 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4642 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4643 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4644
4645 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4646 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4647 IEM_MC_ASSIGN(cShiftArg, cShift);
4648 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4649 IEM_MC_FETCH_EFLAGS(EFlags);
4650 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4651 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4652
4653 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4654 IEM_MC_COMMIT_EFLAGS(EFlags);
4655 IEM_MC_ADVANCE_RIP();
4656 IEM_MC_END();
4657 return VINF_SUCCESS;
4658
4659 case IEMMODE_32BIT:
4660 IEM_MC_BEGIN(4, 2);
4661 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4662 IEM_MC_ARG(uint32_t, u32Src, 1);
4663 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4664 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4665 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4666
4667 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4668 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4669 IEM_MC_ASSIGN(cShiftArg, cShift);
4670 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4671 IEM_MC_FETCH_EFLAGS(EFlags);
4672 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4673 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4674
4675 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4676 IEM_MC_COMMIT_EFLAGS(EFlags);
4677 IEM_MC_ADVANCE_RIP();
4678 IEM_MC_END();
4679 return VINF_SUCCESS;
4680
4681 case IEMMODE_64BIT:
4682 IEM_MC_BEGIN(4, 2);
4683 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4684 IEM_MC_ARG(uint64_t, u64Src, 1);
4685 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4686 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4687 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4688
4689 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4690 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4691 IEM_MC_ASSIGN(cShiftArg, cShift);
4692 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4693 IEM_MC_FETCH_EFLAGS(EFlags);
4694 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4695 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4696
4697 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4698 IEM_MC_COMMIT_EFLAGS(EFlags);
4699 IEM_MC_ADVANCE_RIP();
4700 IEM_MC_END();
4701 return VINF_SUCCESS;
4702
4703 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4704 }
4705 }
4706}
4707
4708
4709/**
4710 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
4711 */
4712FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
4713{
4714 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4715 IEMOP_HLP_NO_LOCK_PREFIX();
4716 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
4717
4718 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4719 {
4720 IEMOP_HLP_NO_LOCK_PREFIX();
4721
4722 switch (pIemCpu->enmEffOpSize)
4723 {
4724 case IEMMODE_16BIT:
4725 IEM_MC_BEGIN(4, 0);
4726 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4727 IEM_MC_ARG(uint16_t, u16Src, 1);
4728 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4729 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4730
4731 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4732 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4733 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4734 IEM_MC_REF_EFLAGS(pEFlags);
4735 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4736
4737 IEM_MC_ADVANCE_RIP();
4738 IEM_MC_END();
4739 return VINF_SUCCESS;
4740
4741 case IEMMODE_32BIT:
4742 IEM_MC_BEGIN(4, 0);
4743 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4744 IEM_MC_ARG(uint32_t, u32Src, 1);
4745 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4746 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4747
4748 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4749 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4750 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4751 IEM_MC_REF_EFLAGS(pEFlags);
4752 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4753
4754 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4755 IEM_MC_ADVANCE_RIP();
4756 IEM_MC_END();
4757 return VINF_SUCCESS;
4758
4759 case IEMMODE_64BIT:
4760 IEM_MC_BEGIN(4, 0);
4761 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4762 IEM_MC_ARG(uint64_t, u64Src, 1);
4763 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4764 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4765
4766 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4767 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4768 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4769 IEM_MC_REF_EFLAGS(pEFlags);
4770 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4771
4772 IEM_MC_ADVANCE_RIP();
4773 IEM_MC_END();
4774 return VINF_SUCCESS;
4775
4776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4777 }
4778 }
4779 else
4780 {
4781 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4782
4783 switch (pIemCpu->enmEffOpSize)
4784 {
4785 case IEMMODE_16BIT:
4786 IEM_MC_BEGIN(4, 2);
4787 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4788 IEM_MC_ARG(uint16_t, u16Src, 1);
4789 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4790 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4791 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4792
4793 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4794 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4795 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4796 IEM_MC_FETCH_EFLAGS(EFlags);
4797 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4798 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4799
4800 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4801 IEM_MC_COMMIT_EFLAGS(EFlags);
4802 IEM_MC_ADVANCE_RIP();
4803 IEM_MC_END();
4804 return VINF_SUCCESS;
4805
4806 case IEMMODE_32BIT:
4807 IEM_MC_BEGIN(4, 2);
4808 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4809 IEM_MC_ARG(uint32_t, u32Src, 1);
4810 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4811 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4812 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4813
4814 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4815 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4816 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4817 IEM_MC_FETCH_EFLAGS(EFlags);
4818 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4819 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4820
4821 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4822 IEM_MC_COMMIT_EFLAGS(EFlags);
4823 IEM_MC_ADVANCE_RIP();
4824 IEM_MC_END();
4825 return VINF_SUCCESS;
4826
4827 case IEMMODE_64BIT:
4828 IEM_MC_BEGIN(4, 2);
4829 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4830 IEM_MC_ARG(uint64_t, u64Src, 1);
4831 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4832 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4833 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4834
4835 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4836 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4837 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4838 IEM_MC_FETCH_EFLAGS(EFlags);
4839 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4840 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4841
4842 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4843 IEM_MC_COMMIT_EFLAGS(EFlags);
4844 IEM_MC_ADVANCE_RIP();
4845 IEM_MC_END();
4846 return VINF_SUCCESS;
4847
4848 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4849 }
4850 }
4851}
4852
4853
4854
4855/** Opcode 0x0f 0xa4. */
4856FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
4857{
4858 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
4859 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
4860}
4861
4862
4863/** Opcode 0x0f 0xa7. */
4864FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
4865{
4866 IEMOP_MNEMONIC("shld Ev,Gv,CL");
4867 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
4868}
4869
4870
4871/** Opcode 0x0f 0xa8. */
4872FNIEMOP_DEF(iemOp_push_gs)
4873{
4874 IEMOP_MNEMONIC("push gs");
4875 IEMOP_HLP_NO_LOCK_PREFIX();
4876 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
4877}
4878
4879
4880/** Opcode 0x0f 0xa9. */
4881FNIEMOP_DEF(iemOp_pop_gs)
4882{
4883 IEMOP_MNEMONIC("pop gs");
4884 IEMOP_HLP_NO_LOCK_PREFIX();
4885 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
4886}
4887
4888
4889/** Opcode 0x0f 0xaa. */
4890FNIEMOP_STUB(iemOp_rsm);
4891
4892
4893/** Opcode 0x0f 0xab. */
4894FNIEMOP_DEF(iemOp_bts_Ev_Gv)
4895{
4896 IEMOP_MNEMONIC("bts Ev,Gv");
4897 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
4898}
4899
4900
4901/** Opcode 0x0f 0xac. */
4902FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
4903{
4904 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
4905 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
4906}
4907
4908
4909/** Opcode 0x0f 0xad. */
4910FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
4911{
4912 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
4913 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
4914}
4915
4916
4917/** Opcode 0x0f 0xae mem/0. */
4918FNIEMOP_DEF_1(iemOp_Grp15_fxsave, uint8_t, bRm)
4919{
4920 IEMOP_MNEMONIC("fxsave m512");
4921 IEMOP_HLP_NO_LOCK_PREFIX();
4922 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
4923 return IEMOP_RAISE_INVALID_OPCODE();
4924
4925 IEM_MC_BEGIN(3, 1);
4926 IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
4927 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
4928 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
4929 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
4930 IEM_MC_CALL_CIMPL_3(iemCImpl_fxsave, iEffSeg, GCPtrEff, enmEffOpSize);
4931 IEM_MC_END();
4932 return VINF_SUCCESS;
4933}
4934
4935
4936/** Opcode 0x0f 0xae mem/1. */
4937FNIEMOP_DEF_1(iemOp_Grp15_fxrstor, uint8_t, bRm)
4938{
4939 IEMOP_MNEMONIC("fxrstor m512");
4940 IEMOP_HLP_NO_LOCK_PREFIX();
4941 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
4942 return IEMOP_RAISE_INVALID_OPCODE();
4943
4944 IEM_MC_BEGIN(3, 1);
4945 IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
4946 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
4947 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
4948 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
4949 IEM_MC_CALL_CIMPL_3(iemCImpl_fxrstor, iEffSeg, GCPtrEff, enmEffOpSize);
4950 IEM_MC_END();
4951 return VINF_SUCCESS;
4952}
4953
4954
4955/** Opcode 0x0f 0xae mem/2. */
4956FNIEMOP_STUB_1(iemOp_Grp15_ldmxcsr, uint8_t, bRm);
4957
4958/** Opcode 0x0f 0xae mem/3. */
4959FNIEMOP_STUB_1(iemOp_Grp15_stmxcsr, uint8_t, bRm);
4960
4961/** Opcode 0x0f 0xae mem/4. */
4962FNIEMOP_UD_STUB_1(iemOp_Grp15_xsave, uint8_t, bRm);
4963
4964/** Opcode 0x0f 0xae mem/5. */
4965FNIEMOP_UD_STUB_1(iemOp_Grp15_xrstor, uint8_t, bRm);
4966
4967/** Opcode 0x0f 0xae mem/6. */
4968FNIEMOP_UD_STUB_1(iemOp_Grp15_xsaveopt, uint8_t, bRm);
4969
4970/** Opcode 0x0f 0xae mem/7. */
4971FNIEMOP_STUB_1(iemOp_Grp15_clflush, uint8_t, bRm);
4972
4973
4974/** Opcode 0x0f 0xae 11b/5. */
4975FNIEMOP_DEF_1(iemOp_Grp15_lfence, uint8_t, bRm)
4976{
4977 IEMOP_MNEMONIC("lfence");
4978 IEMOP_HLP_NO_LOCK_PREFIX();
4979 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
4980 return IEMOP_RAISE_INVALID_OPCODE();
4981
4982 IEM_MC_BEGIN(0, 0);
4983 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
4984 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence);
4985 else
4986 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
4987 IEM_MC_ADVANCE_RIP();
4988 IEM_MC_END();
4989 return VINF_SUCCESS;
4990}
4991
4992
4993/** Opcode 0x0f 0xae 11b/6. */
4994FNIEMOP_DEF_1(iemOp_Grp15_mfence, uint8_t, bRm)
4995{
4996 IEMOP_MNEMONIC("mfence");
4997 IEMOP_HLP_NO_LOCK_PREFIX();
4998 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
4999 return IEMOP_RAISE_INVALID_OPCODE();
5000
5001 IEM_MC_BEGIN(0, 0);
5002 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
5003 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence);
5004 else
5005 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
5006 IEM_MC_ADVANCE_RIP();
5007 IEM_MC_END();
5008 return VINF_SUCCESS;
5009}
5010
5011
5012/** Opcode 0x0f 0xae 11b/7. */
5013FNIEMOP_DEF_1(iemOp_Grp15_sfence, uint8_t, bRm)
5014{
5015 IEMOP_MNEMONIC("sfence");
5016 IEMOP_HLP_NO_LOCK_PREFIX();
5017 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
5018 return IEMOP_RAISE_INVALID_OPCODE();
5019
5020 IEM_MC_BEGIN(0, 0);
5021 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
5022 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence);
5023 else
5024 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
5025 IEM_MC_ADVANCE_RIP();
5026 IEM_MC_END();
5027 return VINF_SUCCESS;
5028}
5029
5030
5031/** Opcode 0xf3 0x0f 0xae 11b/0. */
5032FNIEMOP_UD_STUB_1(iemOp_Grp15_rdfsbase, uint8_t, bRm);
5033
5034/** Opcode 0xf3 0x0f 0xae 11b/1. */
5035FNIEMOP_UD_STUB_1(iemOp_Grp15_rdgsbase, uint8_t, bRm);
5036
5037/** Opcode 0xf3 0x0f 0xae 11b/2. */
5038FNIEMOP_UD_STUB_1(iemOp_Grp15_wrfsbase, uint8_t, bRm);
5039
5040/** Opcode 0xf3 0x0f 0xae 11b/3. */
5041FNIEMOP_UD_STUB_1(iemOp_Grp15_wrgsbase, uint8_t, bRm);
5042
5043
5044/** Opcode 0x0f 0xae. */
5045FNIEMOP_DEF(iemOp_Grp15)
5046{
5047 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5048 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
5049 {
5050 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5051 {
5052 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_fxsave, bRm);
5053 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_fxrstor, bRm);
5054 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_ldmxcsr, bRm);
5055 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_stmxcsr, bRm);
5056 case 4: return FNIEMOP_CALL_1(iemOp_Grp15_xsave, bRm);
5057 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_xrstor, bRm);
5058 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_xsaveopt,bRm);
5059 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_clflush, bRm);
5060 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5061 }
5062 }
5063 else
5064 {
5065 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_LOCK))
5066 {
5067 case 0:
5068 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5069 {
5070 case 0: return IEMOP_RAISE_INVALID_OPCODE();
5071 case 1: return IEMOP_RAISE_INVALID_OPCODE();
5072 case 2: return IEMOP_RAISE_INVALID_OPCODE();
5073 case 3: return IEMOP_RAISE_INVALID_OPCODE();
5074 case 4: return IEMOP_RAISE_INVALID_OPCODE();
5075 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_lfence, bRm);
5076 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_mfence, bRm);
5077 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_sfence, bRm);
5078 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5079 }
5080 break;
5081
5082 case IEM_OP_PRF_REPZ:
5083 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5084 {
5085 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_rdfsbase, bRm);
5086 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_rdgsbase, bRm);
5087 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_wrfsbase, bRm);
5088 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_wrgsbase, bRm);
5089 case 4: return IEMOP_RAISE_INVALID_OPCODE();
5090 case 5: return IEMOP_RAISE_INVALID_OPCODE();
5091 case 6: return IEMOP_RAISE_INVALID_OPCODE();
5092 case 7: return IEMOP_RAISE_INVALID_OPCODE();
5093 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5094 }
5095 break;
5096
5097 default:
5098 return IEMOP_RAISE_INVALID_OPCODE();
5099 }
5100 }
5101}
5102
5103
5104/** Opcode 0x0f 0xaf. */
5105FNIEMOP_DEF(iemOp_imul_Gv_Ev)
5106{
5107 IEMOP_MNEMONIC("imul Gv,Ev");
5108 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5109 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
5110}
5111
5112
5113/** Opcode 0x0f 0xb0. */
5114FNIEMOP_DEF(iemOp_cmpxchg_Eb_Gb)
5115{
5116 IEMOP_MNEMONIC("cmpxchg Eb,Gb");
5117 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5118
5119 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5120 {
5121 IEMOP_HLP_DONE_DECODING();
5122 IEM_MC_BEGIN(4, 0);
5123 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
5124 IEM_MC_ARG(uint8_t *, pu8Al, 1);
5125 IEM_MC_ARG(uint8_t, u8Src, 2);
5126 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5127
5128 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5129 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5130 IEM_MC_REF_GREG_U8(pu8Al, X86_GREG_xAX);
5131 IEM_MC_REF_EFLAGS(pEFlags);
5132 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5133 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags);
5134 else
5135 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8_locked, pu8Dst, pu8Al, u8Src, pEFlags);
5136
5137 IEM_MC_ADVANCE_RIP();
5138 IEM_MC_END();
5139 }
5140 else
5141 {
5142 IEM_MC_BEGIN(4, 3);
5143 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
5144 IEM_MC_ARG(uint8_t *, pu8Al, 1);
5145 IEM_MC_ARG(uint8_t, u8Src, 2);
5146 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5147 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5148 IEM_MC_LOCAL(uint8_t, u8Al);
5149
5150 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5151 IEMOP_HLP_DONE_DECODING();
5152 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5153 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5154 IEM_MC_FETCH_GREG_U8(u8Al, X86_GREG_xAX);
5155 IEM_MC_FETCH_EFLAGS(EFlags);
5156 IEM_MC_REF_LOCAL(pu8Al, u8Al);
5157 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5158 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags);
5159 else
5160 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8_locked, pu8Dst, pu8Al, u8Src, pEFlags);
5161
5162 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
5163 IEM_MC_COMMIT_EFLAGS(EFlags);
5164 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Al);
5165 IEM_MC_ADVANCE_RIP();
5166 IEM_MC_END();
5167 }
5168 return VINF_SUCCESS;
5169}
5170
5171/** Opcode 0x0f 0xb1. */
5172FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv)
5173{
5174 IEMOP_MNEMONIC("cmpxchg Ev,Gv");
5175 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5176
5177 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5178 {
5179 IEMOP_HLP_DONE_DECODING();
5180 switch (pIemCpu->enmEffOpSize)
5181 {
5182 case IEMMODE_16BIT:
5183 IEM_MC_BEGIN(4, 0);
5184 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5185 IEM_MC_ARG(uint16_t *, pu16Ax, 1);
5186 IEM_MC_ARG(uint16_t, u16Src, 2);
5187 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5188
5189 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5190 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5191 IEM_MC_REF_GREG_U16(pu16Ax, X86_GREG_xAX);
5192 IEM_MC_REF_EFLAGS(pEFlags);
5193 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5194 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags);
5195 else
5196 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16_locked, pu16Dst, pu16Ax, u16Src, pEFlags);
5197
5198 IEM_MC_ADVANCE_RIP();
5199 IEM_MC_END();
5200 return VINF_SUCCESS;
5201
5202 case IEMMODE_32BIT:
5203 IEM_MC_BEGIN(4, 0);
5204 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5205 IEM_MC_ARG(uint32_t *, pu32Eax, 1);
5206 IEM_MC_ARG(uint32_t, u32Src, 2);
5207 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5208
5209 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5210 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5211 IEM_MC_REF_GREG_U32(pu32Eax, X86_GREG_xAX);
5212 IEM_MC_REF_EFLAGS(pEFlags);
5213 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5214 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags);
5215 else
5216 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32_locked, pu32Dst, pu32Eax, u32Src, pEFlags);
5217
5218 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Eax);
5219 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
5220 IEM_MC_ADVANCE_RIP();
5221 IEM_MC_END();
5222 return VINF_SUCCESS;
5223
5224 case IEMMODE_64BIT:
5225 IEM_MC_BEGIN(4, 0);
5226 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5227 IEM_MC_ARG(uint64_t *, pu64Rax, 1);
5228#ifdef RT_ARCH_X86
5229 IEM_MC_ARG(uint64_t *, pu64Src, 2);
5230#else
5231 IEM_MC_ARG(uint64_t, u64Src, 2);
5232#endif
5233 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5234
5235 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5236 IEM_MC_REF_GREG_U64(pu64Rax, X86_GREG_xAX);
5237 IEM_MC_REF_EFLAGS(pEFlags);
5238#ifdef RT_ARCH_X86
5239 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5240 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5241 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags);
5242 else
5243 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags);
5244#else
5245 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5246 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5247 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags);
5248 else
5249 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, u64Src, pEFlags);
5250#endif
5251
5252 IEM_MC_ADVANCE_RIP();
5253 IEM_MC_END();
5254 return VINF_SUCCESS;
5255
5256 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5257 }
5258 }
5259 else
5260 {
5261 switch (pIemCpu->enmEffOpSize)
5262 {
5263 case IEMMODE_16BIT:
5264 IEM_MC_BEGIN(4, 3);
5265 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5266 IEM_MC_ARG(uint16_t *, pu16Ax, 1);
5267 IEM_MC_ARG(uint16_t, u16Src, 2);
5268 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5269 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5270 IEM_MC_LOCAL(uint16_t, u16Ax);
5271
5272 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5273 IEMOP_HLP_DONE_DECODING();
5274 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5275 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5276 IEM_MC_FETCH_GREG_U16(u16Ax, X86_GREG_xAX);
5277 IEM_MC_FETCH_EFLAGS(EFlags);
5278 IEM_MC_REF_LOCAL(pu16Ax, u16Ax);
5279 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5280 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags);
5281 else
5282 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16_locked, pu16Dst, pu16Ax, u16Src, pEFlags);
5283
5284 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
5285 IEM_MC_COMMIT_EFLAGS(EFlags);
5286 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Ax);
5287 IEM_MC_ADVANCE_RIP();
5288 IEM_MC_END();
5289 return VINF_SUCCESS;
5290
5291 case IEMMODE_32BIT:
5292 IEM_MC_BEGIN(4, 3);
5293 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5294 IEM_MC_ARG(uint32_t *, pu32Eax, 1);
5295 IEM_MC_ARG(uint32_t, u32Src, 2);
5296 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5297 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5298 IEM_MC_LOCAL(uint32_t, u32Eax);
5299
5300 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5301 IEMOP_HLP_DONE_DECODING();
5302 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5303 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5304 IEM_MC_FETCH_GREG_U32(u32Eax, X86_GREG_xAX);
5305 IEM_MC_FETCH_EFLAGS(EFlags);
5306 IEM_MC_REF_LOCAL(pu32Eax, u32Eax);
5307 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5308 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags);
5309 else
5310 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32_locked, pu32Dst, pu32Eax, u32Src, pEFlags);
5311
5312 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
5313 IEM_MC_COMMIT_EFLAGS(EFlags);
5314 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Eax);
5315 IEM_MC_ADVANCE_RIP();
5316 IEM_MC_END();
5317 return VINF_SUCCESS;
5318
5319 case IEMMODE_64BIT:
5320 IEM_MC_BEGIN(4, 3);
5321 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5322 IEM_MC_ARG(uint64_t *, pu64Rax, 1);
5323#ifdef RT_ARCH_X86
5324 IEM_MC_ARG(uint64_t *, pu64Src, 2);
5325#else
5326 IEM_MC_ARG(uint64_t, u64Src, 2);
5327#endif
5328 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5329 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5330 IEM_MC_LOCAL(uint64_t, u64Rax);
5331
5332 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5333 IEMOP_HLP_DONE_DECODING();
5334 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5335 IEM_MC_FETCH_GREG_U64(u64Rax, X86_GREG_xAX);
5336 IEM_MC_FETCH_EFLAGS(EFlags);
5337 IEM_MC_REF_LOCAL(pu64Rax, u64Rax);
5338#ifdef RT_ARCH_X86
5339 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5340 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5341 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags);
5342 else
5343 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags);
5344#else
5345 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5346 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5347 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags);
5348 else
5349 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, u64Src, pEFlags);
5350#endif
5351
5352 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
5353 IEM_MC_COMMIT_EFLAGS(EFlags);
5354 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Rax);
5355 IEM_MC_ADVANCE_RIP();
5356 IEM_MC_END();
5357 return VINF_SUCCESS;
5358
5359 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5360 }
5361 }
5362}
5363
5364
5365FNIEMOP_DEF_2(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg, uint8_t, bRm)
5366{
5367 Assert((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)); /* Caller checks this */
5368 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
5369
5370 switch (pIemCpu->enmEffOpSize)
5371 {
5372 case IEMMODE_16BIT:
5373 IEM_MC_BEGIN(5, 1);
5374 IEM_MC_ARG(uint16_t, uSel, 0);
5375 IEM_MC_ARG(uint16_t, offSeg, 1);
5376 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5377 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5378 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5379 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5380 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5381 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5382 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5383 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
5384 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5385 IEM_MC_END();
5386 return VINF_SUCCESS;
5387
5388 case IEMMODE_32BIT:
5389 IEM_MC_BEGIN(5, 1);
5390 IEM_MC_ARG(uint16_t, uSel, 0);
5391 IEM_MC_ARG(uint32_t, offSeg, 1);
5392 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5393 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5394 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5395 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5396 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5397 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5398 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5399 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
5400 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5401 IEM_MC_END();
5402 return VINF_SUCCESS;
5403
5404 case IEMMODE_64BIT:
5405 IEM_MC_BEGIN(5, 1);
5406 IEM_MC_ARG(uint16_t, uSel, 0);
5407 IEM_MC_ARG(uint64_t, offSeg, 1);
5408 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5409 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5410 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5411 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5412 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5413 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5414 if (IEM_IS_GUEST_CPU_AMD(pIemCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */
5415 IEM_MC_FETCH_MEM_U32_SX_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5416 else
5417 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5418 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
5419 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5420 IEM_MC_END();
5421 return VINF_SUCCESS;
5422
5423 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5424 }
5425}
5426
5427
5428/** Opcode 0x0f 0xb2. */
5429FNIEMOP_DEF(iemOp_lss_Gv_Mp)
5430{
5431 IEMOP_MNEMONIC("lss Gv,Mp");
5432 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5433 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5434 return IEMOP_RAISE_INVALID_OPCODE();
5435 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_SS, bRm);
5436}
5437
5438
5439/** Opcode 0x0f 0xb3. */
5440FNIEMOP_DEF(iemOp_btr_Ev_Gv)
5441{
5442 IEMOP_MNEMONIC("btr Ev,Gv");
5443 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
5444}
5445
5446
5447/** Opcode 0x0f 0xb4. */
5448FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
5449{
5450 IEMOP_MNEMONIC("lfs Gv,Mp");
5451 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5452 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5453 return IEMOP_RAISE_INVALID_OPCODE();
5454 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_FS, bRm);
5455}
5456
5457
5458/** Opcode 0x0f 0xb5. */
5459FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
5460{
5461 IEMOP_MNEMONIC("lgs Gv,Mp");
5462 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5463 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5464 return IEMOP_RAISE_INVALID_OPCODE();
5465 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_GS, bRm);
5466}
5467
5468
5469/** Opcode 0x0f 0xb6. */
5470FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
5471{
5472 IEMOP_MNEMONIC("movzx Gv,Eb");
5473
5474 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5475 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5476
5477 /*
5478 * If rm is denoting a register, no more instruction bytes.
5479 */
5480 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5481 {
5482 switch (pIemCpu->enmEffOpSize)
5483 {
5484 case IEMMODE_16BIT:
5485 IEM_MC_BEGIN(0, 1);
5486 IEM_MC_LOCAL(uint16_t, u16Value);
5487 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5488 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5489 IEM_MC_ADVANCE_RIP();
5490 IEM_MC_END();
5491 return VINF_SUCCESS;
5492
5493 case IEMMODE_32BIT:
5494 IEM_MC_BEGIN(0, 1);
5495 IEM_MC_LOCAL(uint32_t, u32Value);
5496 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5497 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5498 IEM_MC_ADVANCE_RIP();
5499 IEM_MC_END();
5500 return VINF_SUCCESS;
5501
5502 case IEMMODE_64BIT:
5503 IEM_MC_BEGIN(0, 1);
5504 IEM_MC_LOCAL(uint64_t, u64Value);
5505 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5506 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5507 IEM_MC_ADVANCE_RIP();
5508 IEM_MC_END();
5509 return VINF_SUCCESS;
5510
5511 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5512 }
5513 }
5514 else
5515 {
5516 /*
5517 * We're loading a register from memory.
5518 */
5519 switch (pIemCpu->enmEffOpSize)
5520 {
5521 case IEMMODE_16BIT:
5522 IEM_MC_BEGIN(0, 2);
5523 IEM_MC_LOCAL(uint16_t, u16Value);
5524 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5525 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5526 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
5527 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5528 IEM_MC_ADVANCE_RIP();
5529 IEM_MC_END();
5530 return VINF_SUCCESS;
5531
5532 case IEMMODE_32BIT:
5533 IEM_MC_BEGIN(0, 2);
5534 IEM_MC_LOCAL(uint32_t, u32Value);
5535 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5536 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5537 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5538 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5539 IEM_MC_ADVANCE_RIP();
5540 IEM_MC_END();
5541 return VINF_SUCCESS;
5542
5543 case IEMMODE_64BIT:
5544 IEM_MC_BEGIN(0, 2);
5545 IEM_MC_LOCAL(uint64_t, u64Value);
5546 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5547 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5548 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5549 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5550 IEM_MC_ADVANCE_RIP();
5551 IEM_MC_END();
5552 return VINF_SUCCESS;
5553
5554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5555 }
5556 }
5557}
5558
5559
5560/** Opcode 0x0f 0xb7. */
5561FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
5562{
5563 IEMOP_MNEMONIC("movzx Gv,Ew");
5564
5565 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5566 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5567
5568 /** @todo Not entirely sure how the operand size prefix is handled here,
5569 * assuming that it will be ignored. Would be nice to have a few
5570 * test for this. */
5571 /*
5572 * If rm is denoting a register, no more instruction bytes.
5573 */
5574 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5575 {
5576 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5577 {
5578 IEM_MC_BEGIN(0, 1);
5579 IEM_MC_LOCAL(uint32_t, u32Value);
5580 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5581 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5582 IEM_MC_ADVANCE_RIP();
5583 IEM_MC_END();
5584 }
5585 else
5586 {
5587 IEM_MC_BEGIN(0, 1);
5588 IEM_MC_LOCAL(uint64_t, u64Value);
5589 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5590 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5591 IEM_MC_ADVANCE_RIP();
5592 IEM_MC_END();
5593 }
5594 }
5595 else
5596 {
5597 /*
5598 * We're loading a register from memory.
5599 */
5600 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5601 {
5602 IEM_MC_BEGIN(0, 2);
5603 IEM_MC_LOCAL(uint32_t, u32Value);
5604 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5605 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5606 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5607 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5608 IEM_MC_ADVANCE_RIP();
5609 IEM_MC_END();
5610 }
5611 else
5612 {
5613 IEM_MC_BEGIN(0, 2);
5614 IEM_MC_LOCAL(uint64_t, u64Value);
5615 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5616 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5617 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5618 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5619 IEM_MC_ADVANCE_RIP();
5620 IEM_MC_END();
5621 }
5622 }
5623 return VINF_SUCCESS;
5624}
5625
5626
5627/** Opcode 0x0f 0xb8. */
5628FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
5629
5630
5631/** Opcode 0x0f 0xb9. */
5632FNIEMOP_DEF(iemOp_Grp10)
5633{
5634 Log(("iemOp_Grp10 -> #UD\n"));
5635 return IEMOP_RAISE_INVALID_OPCODE();
5636}
5637
5638
5639/** Opcode 0x0f 0xba. */
5640FNIEMOP_DEF(iemOp_Grp8)
5641{
5642 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5643 PCIEMOPBINSIZES pImpl;
5644 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5645 {
5646 case 0: case 1: case 2: case 3:
5647 return IEMOP_RAISE_INVALID_OPCODE();
5648 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
5649 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
5650 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
5651 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
5652 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5653 }
5654 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5655
5656 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5657 {
5658 /* register destination. */
5659 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5660 IEMOP_HLP_NO_LOCK_PREFIX();
5661
5662 switch (pIemCpu->enmEffOpSize)
5663 {
5664 case IEMMODE_16BIT:
5665 IEM_MC_BEGIN(3, 0);
5666 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5667 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
5668 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5669
5670 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5671 IEM_MC_REF_EFLAGS(pEFlags);
5672 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
5673
5674 IEM_MC_ADVANCE_RIP();
5675 IEM_MC_END();
5676 return VINF_SUCCESS;
5677
5678 case IEMMODE_32BIT:
5679 IEM_MC_BEGIN(3, 0);
5680 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5681 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
5682 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5683
5684 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5685 IEM_MC_REF_EFLAGS(pEFlags);
5686 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
5687
5688 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
5689 IEM_MC_ADVANCE_RIP();
5690 IEM_MC_END();
5691 return VINF_SUCCESS;
5692
5693 case IEMMODE_64BIT:
5694 IEM_MC_BEGIN(3, 0);
5695 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5696 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
5697 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5698
5699 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5700 IEM_MC_REF_EFLAGS(pEFlags);
5701 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
5702
5703 IEM_MC_ADVANCE_RIP();
5704 IEM_MC_END();
5705 return VINF_SUCCESS;
5706
5707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5708 }
5709 }
5710 else
5711 {
5712 /* memory destination. */
5713
5714 uint32_t fAccess;
5715 if (pImpl->pfnLockedU16)
5716 fAccess = IEM_ACCESS_DATA_RW;
5717 else /* BT */
5718 {
5719 IEMOP_HLP_NO_LOCK_PREFIX();
5720 fAccess = IEM_ACCESS_DATA_R;
5721 }
5722
5723 /** @todo test negative bit offsets! */
5724 switch (pIemCpu->enmEffOpSize)
5725 {
5726 case IEMMODE_16BIT:
5727 IEM_MC_BEGIN(3, 1);
5728 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5729 IEM_MC_ARG(uint16_t, u16Src, 1);
5730 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5731 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5732
5733 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5734 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5735 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
5736 IEM_MC_FETCH_EFLAGS(EFlags);
5737 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5738 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5739 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
5740 else
5741 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
5742 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
5743
5744 IEM_MC_COMMIT_EFLAGS(EFlags);
5745 IEM_MC_ADVANCE_RIP();
5746 IEM_MC_END();
5747 return VINF_SUCCESS;
5748
5749 case IEMMODE_32BIT:
5750 IEM_MC_BEGIN(3, 1);
5751 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5752 IEM_MC_ARG(uint32_t, u32Src, 1);
5753 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5754 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5755
5756 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5757 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5758 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
5759 IEM_MC_FETCH_EFLAGS(EFlags);
5760 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5761 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5762 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
5763 else
5764 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
5765 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
5766
5767 IEM_MC_COMMIT_EFLAGS(EFlags);
5768 IEM_MC_ADVANCE_RIP();
5769 IEM_MC_END();
5770 return VINF_SUCCESS;
5771
5772 case IEMMODE_64BIT:
5773 IEM_MC_BEGIN(3, 1);
5774 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5775 IEM_MC_ARG(uint64_t, u64Src, 1);
5776 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5777 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5778
5779 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5780 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5781 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
5782 IEM_MC_FETCH_EFLAGS(EFlags);
5783 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5784 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5785 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
5786 else
5787 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
5788 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
5789
5790 IEM_MC_COMMIT_EFLAGS(EFlags);
5791 IEM_MC_ADVANCE_RIP();
5792 IEM_MC_END();
5793 return VINF_SUCCESS;
5794
5795 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5796 }
5797 }
5798
5799}
5800
5801
5802/** Opcode 0x0f 0xbb. */
5803FNIEMOP_DEF(iemOp_btc_Ev_Gv)
5804{
5805 IEMOP_MNEMONIC("btc Ev,Gv");
5806 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
5807}
5808
5809
5810/** Opcode 0x0f 0xbc. */
5811FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
5812{
5813 IEMOP_MNEMONIC("bsf Gv,Ev");
5814 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
5815 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
5816}
5817
5818
5819/** Opcode 0x0f 0xbd. */
5820FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
5821{
5822 IEMOP_MNEMONIC("bsr Gv,Ev");
5823 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
5824 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
5825}
5826
5827
5828/** Opcode 0x0f 0xbe. */
5829FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
5830{
5831 IEMOP_MNEMONIC("movsx Gv,Eb");
5832
5833 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5834 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5835
5836 /*
5837 * If rm is denoting a register, no more instruction bytes.
5838 */
5839 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5840 {
5841 switch (pIemCpu->enmEffOpSize)
5842 {
5843 case IEMMODE_16BIT:
5844 IEM_MC_BEGIN(0, 1);
5845 IEM_MC_LOCAL(uint16_t, u16Value);
5846 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5847 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5848 IEM_MC_ADVANCE_RIP();
5849 IEM_MC_END();
5850 return VINF_SUCCESS;
5851
5852 case IEMMODE_32BIT:
5853 IEM_MC_BEGIN(0, 1);
5854 IEM_MC_LOCAL(uint32_t, u32Value);
5855 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5856 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5857 IEM_MC_ADVANCE_RIP();
5858 IEM_MC_END();
5859 return VINF_SUCCESS;
5860
5861 case IEMMODE_64BIT:
5862 IEM_MC_BEGIN(0, 1);
5863 IEM_MC_LOCAL(uint64_t, u64Value);
5864 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5865 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5866 IEM_MC_ADVANCE_RIP();
5867 IEM_MC_END();
5868 return VINF_SUCCESS;
5869
5870 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5871 }
5872 }
5873 else
5874 {
5875 /*
5876 * We're loading a register from memory.
5877 */
5878 switch (pIemCpu->enmEffOpSize)
5879 {
5880 case IEMMODE_16BIT:
5881 IEM_MC_BEGIN(0, 2);
5882 IEM_MC_LOCAL(uint16_t, u16Value);
5883 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5884 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5885 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
5886 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5887 IEM_MC_ADVANCE_RIP();
5888 IEM_MC_END();
5889 return VINF_SUCCESS;
5890
5891 case IEMMODE_32BIT:
5892 IEM_MC_BEGIN(0, 2);
5893 IEM_MC_LOCAL(uint32_t, u32Value);
5894 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5895 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5896 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5897 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5898 IEM_MC_ADVANCE_RIP();
5899 IEM_MC_END();
5900 return VINF_SUCCESS;
5901
5902 case IEMMODE_64BIT:
5903 IEM_MC_BEGIN(0, 2);
5904 IEM_MC_LOCAL(uint64_t, u64Value);
5905 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5906 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5907 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5908 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5909 IEM_MC_ADVANCE_RIP();
5910 IEM_MC_END();
5911 return VINF_SUCCESS;
5912
5913 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5914 }
5915 }
5916}
5917
5918
5919/** Opcode 0x0f 0xbf. */
5920FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
5921{
5922 IEMOP_MNEMONIC("movsx Gv,Ew");
5923
5924 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5925 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5926
5927 /** @todo Not entirely sure how the operand size prefix is handled here,
5928 * assuming that it will be ignored. Would be nice to have a few
5929 * test for this. */
5930 /*
5931 * If rm is denoting a register, no more instruction bytes.
5932 */
5933 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5934 {
5935 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5936 {
5937 IEM_MC_BEGIN(0, 1);
5938 IEM_MC_LOCAL(uint32_t, u32Value);
5939 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5940 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5941 IEM_MC_ADVANCE_RIP();
5942 IEM_MC_END();
5943 }
5944 else
5945 {
5946 IEM_MC_BEGIN(0, 1);
5947 IEM_MC_LOCAL(uint64_t, u64Value);
5948 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5949 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5950 IEM_MC_ADVANCE_RIP();
5951 IEM_MC_END();
5952 }
5953 }
5954 else
5955 {
5956 /*
5957 * We're loading a register from memory.
5958 */
5959 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5960 {
5961 IEM_MC_BEGIN(0, 2);
5962 IEM_MC_LOCAL(uint32_t, u32Value);
5963 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5964 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5965 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5966 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5967 IEM_MC_ADVANCE_RIP();
5968 IEM_MC_END();
5969 }
5970 else
5971 {
5972 IEM_MC_BEGIN(0, 2);
5973 IEM_MC_LOCAL(uint64_t, u64Value);
5974 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5975 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5976 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5977 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5978 IEM_MC_ADVANCE_RIP();
5979 IEM_MC_END();
5980 }
5981 }
5982 return VINF_SUCCESS;
5983}
5984
5985
5986/** Opcode 0x0f 0xc0. */
5987FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
5988{
5989 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5990 IEMOP_MNEMONIC("xadd Eb,Gb");
5991
5992 /*
5993 * If rm is denoting a register, no more instruction bytes.
5994 */
5995 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5996 {
5997 IEMOP_HLP_NO_LOCK_PREFIX();
5998
5999 IEM_MC_BEGIN(3, 0);
6000 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6001 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6002 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6003
6004 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6005 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6006 IEM_MC_REF_EFLAGS(pEFlags);
6007 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
6008
6009 IEM_MC_ADVANCE_RIP();
6010 IEM_MC_END();
6011 }
6012 else
6013 {
6014 /*
6015 * We're accessing memory.
6016 */
6017 IEM_MC_BEGIN(3, 3);
6018 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6019 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6020 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6021 IEM_MC_LOCAL(uint8_t, u8RegCopy);
6022 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6023
6024 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6025 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6026 IEM_MC_FETCH_GREG_U8(u8RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6027 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
6028 IEM_MC_FETCH_EFLAGS(EFlags);
6029 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6030 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
6031 else
6032 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8_locked, pu8Dst, pu8Reg, pEFlags);
6033
6034 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
6035 IEM_MC_COMMIT_EFLAGS(EFlags);
6036 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8RegCopy);
6037 IEM_MC_ADVANCE_RIP();
6038 IEM_MC_END();
6039 return VINF_SUCCESS;
6040 }
6041 return VINF_SUCCESS;
6042}
6043
6044
6045/** Opcode 0x0f 0xc1. */
6046FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
6047{
6048 IEMOP_MNEMONIC("xadd Ev,Gv");
6049 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6050
6051 /*
6052 * If rm is denoting a register, no more instruction bytes.
6053 */
6054 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6055 {
6056 IEMOP_HLP_NO_LOCK_PREFIX();
6057
6058 switch (pIemCpu->enmEffOpSize)
6059 {
6060 case IEMMODE_16BIT:
6061 IEM_MC_BEGIN(3, 0);
6062 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6063 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6064 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6065
6066 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6067 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6068 IEM_MC_REF_EFLAGS(pEFlags);
6069 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
6070
6071 IEM_MC_ADVANCE_RIP();
6072 IEM_MC_END();
6073 return VINF_SUCCESS;
6074
6075 case IEMMODE_32BIT:
6076 IEM_MC_BEGIN(3, 0);
6077 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6078 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6079 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6080
6081 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6082 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6083 IEM_MC_REF_EFLAGS(pEFlags);
6084 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
6085
6086 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
6087 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
6088 IEM_MC_ADVANCE_RIP();
6089 IEM_MC_END();
6090 return VINF_SUCCESS;
6091
6092 case IEMMODE_64BIT:
6093 IEM_MC_BEGIN(3, 0);
6094 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6095 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6096 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6097
6098 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6099 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6100 IEM_MC_REF_EFLAGS(pEFlags);
6101 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
6102
6103 IEM_MC_ADVANCE_RIP();
6104 IEM_MC_END();
6105 return VINF_SUCCESS;
6106
6107 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6108 }
6109 }
6110 else
6111 {
6112 /*
6113 * We're accessing memory.
6114 */
6115 switch (pIemCpu->enmEffOpSize)
6116 {
6117 case IEMMODE_16BIT:
6118 IEM_MC_BEGIN(3, 3);
6119 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6120 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6121 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6122 IEM_MC_LOCAL(uint16_t, u16RegCopy);
6123 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6124
6125 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6126 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6127 IEM_MC_FETCH_GREG_U16(u16RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6128 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
6129 IEM_MC_FETCH_EFLAGS(EFlags);
6130 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6131 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
6132 else
6133 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16_locked, pu16Dst, pu16Reg, pEFlags);
6134
6135 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
6136 IEM_MC_COMMIT_EFLAGS(EFlags);
6137 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16RegCopy);
6138 IEM_MC_ADVANCE_RIP();
6139 IEM_MC_END();
6140 return VINF_SUCCESS;
6141
6142 case IEMMODE_32BIT:
6143 IEM_MC_BEGIN(3, 3);
6144 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6145 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6146 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6147 IEM_MC_LOCAL(uint32_t, u32RegCopy);
6148 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6149
6150 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6151 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6152 IEM_MC_FETCH_GREG_U32(u32RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6153 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
6154 IEM_MC_FETCH_EFLAGS(EFlags);
6155 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6156 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
6157 else
6158 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32_locked, pu32Dst, pu32Reg, pEFlags);
6159
6160 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
6161 IEM_MC_COMMIT_EFLAGS(EFlags);
6162 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32RegCopy);
6163 IEM_MC_ADVANCE_RIP();
6164 IEM_MC_END();
6165 return VINF_SUCCESS;
6166
6167 case IEMMODE_64BIT:
6168 IEM_MC_BEGIN(3, 3);
6169 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6170 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6171 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6172 IEM_MC_LOCAL(uint64_t, u64RegCopy);
6173 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6174
6175 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6176 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6177 IEM_MC_FETCH_GREG_U64(u64RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6178 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
6179 IEM_MC_FETCH_EFLAGS(EFlags);
6180 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6181 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
6182 else
6183 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64_locked, pu64Dst, pu64Reg, pEFlags);
6184
6185 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
6186 IEM_MC_COMMIT_EFLAGS(EFlags);
6187 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64RegCopy);
6188 IEM_MC_ADVANCE_RIP();
6189 IEM_MC_END();
6190 return VINF_SUCCESS;
6191
6192 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6193 }
6194 }
6195}
6196
6197/** Opcode 0x0f 0xc2. */
6198FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
6199
6200/** Opcode 0x0f 0xc3. */
6201FNIEMOP_STUB(iemOp_movnti_My_Gy);
6202
6203/** Opcode 0x0f 0xc4. */
6204FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
6205
6206/** Opcode 0x0f 0xc5. */
6207FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
6208
6209/** Opcode 0x0f 0xc6. */
6210FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
6211
6212
6213/** Opcode 0x0f 0xc7 !11/1. */
6214FNIEMOP_DEF_1(iemOp_Grp9_cmpxchg8b_Mq, uint8_t, bRm)
6215{
6216 IEMOP_MNEMONIC("cmpxchg8b Mq");
6217
6218 IEM_MC_BEGIN(4, 3);
6219 IEM_MC_ARG(uint64_t *, pu64MemDst, 0);
6220 IEM_MC_ARG(PRTUINT64U, pu64EaxEdx, 1);
6221 IEM_MC_ARG(PRTUINT64U, pu64EbxEcx, 2);
6222 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 3);
6223 IEM_MC_LOCAL(RTUINT64U, u64EaxEdx);
6224 IEM_MC_LOCAL(RTUINT64U, u64EbxEcx);
6225 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6226
6227 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6228 IEMOP_HLP_DONE_DECODING();
6229 IEM_MC_MEM_MAP(pu64MemDst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6230
6231 IEM_MC_FETCH_GREG_U32(u64EaxEdx.s.Lo, X86_GREG_xAX);
6232 IEM_MC_FETCH_GREG_U32(u64EaxEdx.s.Hi, X86_GREG_xDX);
6233 IEM_MC_REF_LOCAL(pu64EaxEdx, u64EaxEdx);
6234
6235 IEM_MC_FETCH_GREG_U32(u64EbxEcx.s.Lo, X86_GREG_xBX);
6236 IEM_MC_FETCH_GREG_U32(u64EbxEcx.s.Hi, X86_GREG_xCX);
6237 IEM_MC_REF_LOCAL(pu64EbxEcx, u64EbxEcx);
6238
6239 IEM_MC_FETCH_EFLAGS(EFlags);
6240 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6241 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg8b, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags);
6242 else
6243 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg8b_locked, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags);
6244
6245 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64MemDst, IEM_ACCESS_DATA_RW);
6246 IEM_MC_COMMIT_EFLAGS(EFlags);
6247 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF)
6248 /** @todo Testcase: Check effect of cmpxchg8b on bits 63:32 in rax and rdx. */
6249 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u64EaxEdx.s.Lo);
6250 IEM_MC_STORE_GREG_U32(X86_GREG_xDX, u64EaxEdx.s.Hi);
6251 IEM_MC_ENDIF();
6252 IEM_MC_ADVANCE_RIP();
6253
6254 IEM_MC_END();
6255 return VINF_SUCCESS;
6256}
6257
6258
6259/** Opcode REX.W 0x0f 0xc7 !11/1. */
6260FNIEMOP_UD_STUB_1(iemOp_Grp9_cmpxchg16b_Mdq, uint8_t, bRm);
6261
6262/** Opcode 0x0f 0xc7 11/6. */
6263FNIEMOP_UD_STUB_1(iemOp_Grp9_rdrand_Rv, uint8_t, bRm);
6264
6265/** Opcode 0x0f 0xc7 !11/6. */
6266FNIEMOP_UD_STUB_1(iemOp_Grp9_vmptrld_Mq, uint8_t, bRm);
6267
6268/** Opcode 0x66 0x0f 0xc7 !11/6. */
6269FNIEMOP_UD_STUB_1(iemOp_Grp9_vmclear_Mq, uint8_t, bRm);
6270
6271/** Opcode 0xf3 0x0f 0xc7 !11/6. */
6272FNIEMOP_UD_STUB_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm);
6273
6274/** Opcode [0xf3] 0x0f 0xc7 !11/7. */
6275FNIEMOP_UD_STUB_1(iemOp_Grp9_vmptrst_Mq, uint8_t, bRm);
6276
6277
6278/** Opcode 0x0f 0xc7. */
6279FNIEMOP_DEF(iemOp_Grp9)
6280{
6281 /** @todo Testcase: Check mixing 0x66 and 0xf3. Check the effect of 0xf2. */
6282 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6283 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
6284 {
6285 case 0: case 2: case 3: case 4: case 5:
6286 return IEMOP_RAISE_INVALID_OPCODE();
6287 case 1:
6288 /** @todo Testcase: Check prefix effects on cmpxchg8b/16b. */
6289 if ( (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)
6290 || (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))) /** @todo Testcase: AMD seems to express a different idea here wrt prefixes. */
6291 return IEMOP_RAISE_INVALID_OPCODE();
6292 if (bRm & IEM_OP_PRF_SIZE_REX_W)
6293 return FNIEMOP_CALL_1(iemOp_Grp9_cmpxchg16b_Mdq, bRm);
6294 return FNIEMOP_CALL_1(iemOp_Grp9_cmpxchg8b_Mq, bRm);
6295 case 6:
6296 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6297 return FNIEMOP_CALL_1(iemOp_Grp9_rdrand_Rv, bRm);
6298 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))
6299 {
6300 case 0:
6301 return FNIEMOP_CALL_1(iemOp_Grp9_vmptrld_Mq, bRm);
6302 case IEM_OP_PRF_SIZE_OP:
6303 return FNIEMOP_CALL_1(iemOp_Grp9_vmclear_Mq, bRm);
6304 case IEM_OP_PRF_REPZ:
6305 return FNIEMOP_CALL_1(iemOp_Grp9_vmxon_Mq, bRm);
6306 default:
6307 return IEMOP_RAISE_INVALID_OPCODE();
6308 }
6309 case 7:
6310 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))
6311 {
6312 case 0:
6313 case IEM_OP_PRF_REPZ:
6314 return FNIEMOP_CALL_1(iemOp_Grp9_vmptrst_Mq, bRm);
6315 default:
6316 return IEMOP_RAISE_INVALID_OPCODE();
6317 }
6318 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6319 }
6320}
6321
6322
6323/**
6324 * Common 'bswap register' helper.
6325 */
6326FNIEMOP_DEF_1(iemOpCommonBswapGReg, uint8_t, iReg)
6327{
6328 IEMOP_HLP_NO_LOCK_PREFIX();
6329 switch (pIemCpu->enmEffOpSize)
6330 {
6331 case IEMMODE_16BIT:
6332 IEM_MC_BEGIN(1, 0);
6333 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6334 IEM_MC_REF_GREG_U32(pu32Dst, iReg); /* Don't clear the high dword! */
6335 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u16, pu32Dst);
6336 IEM_MC_ADVANCE_RIP();
6337 IEM_MC_END();
6338 return VINF_SUCCESS;
6339
6340 case IEMMODE_32BIT:
6341 IEM_MC_BEGIN(1, 0);
6342 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6343 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
6344 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
6345 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u32, pu32Dst);
6346 IEM_MC_ADVANCE_RIP();
6347 IEM_MC_END();
6348 return VINF_SUCCESS;
6349
6350 case IEMMODE_64BIT:
6351 IEM_MC_BEGIN(1, 0);
6352 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6353 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
6354 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u64, pu64Dst);
6355 IEM_MC_ADVANCE_RIP();
6356 IEM_MC_END();
6357 return VINF_SUCCESS;
6358
6359 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6360 }
6361}
6362
6363
6364/** Opcode 0x0f 0xc8. */
6365FNIEMOP_DEF(iemOp_bswap_rAX_r8)
6366{
6367 IEMOP_MNEMONIC("bswap rAX/r8");
6368 /* Note! Intel manuals states that R8-R15 can be accessed by using a REX.X
6369 prefix. REX.B is the correct prefix it appears. For a parallel
6370 case, see iemOp_mov_AL_Ib and iemOp_mov_eAX_Iv. */
6371 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexB);
6372}
6373
6374
6375/** Opcode 0x0f 0xc9. */
6376FNIEMOP_DEF(iemOp_bswap_rCX_r9)
6377{
6378 IEMOP_MNEMONIC("bswap rCX/r9");
6379 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexB);
6380}
6381
6382
6383/** Opcode 0x0f 0xca. */
6384FNIEMOP_DEF(iemOp_bswap_rDX_r10)
6385{
6386 IEMOP_MNEMONIC("bswap rDX/r9");
6387 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexB);
6388}
6389
6390
6391/** Opcode 0x0f 0xcb. */
6392FNIEMOP_DEF(iemOp_bswap_rBX_r11)
6393{
6394 IEMOP_MNEMONIC("bswap rBX/r9");
6395 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexB);
6396}
6397
6398
6399/** Opcode 0x0f 0xcc. */
6400FNIEMOP_DEF(iemOp_bswap_rSP_r12)
6401{
6402 IEMOP_MNEMONIC("bswap rSP/r12");
6403 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexB);
6404}
6405
6406
6407/** Opcode 0x0f 0xcd. */
6408FNIEMOP_DEF(iemOp_bswap_rBP_r13)
6409{
6410 IEMOP_MNEMONIC("bswap rBP/r13");
6411 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexB);
6412}
6413
6414
6415/** Opcode 0x0f 0xce. */
6416FNIEMOP_DEF(iemOp_bswap_rSI_r14)
6417{
6418 IEMOP_MNEMONIC("bswap rSI/r14");
6419 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexB);
6420}
6421
6422
6423/** Opcode 0x0f 0xcf. */
6424FNIEMOP_DEF(iemOp_bswap_rDI_r15)
6425{
6426 IEMOP_MNEMONIC("bswap rDI/r15");
6427 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexB);
6428}
6429
6430
6431
6432/** Opcode 0x0f 0xd0. */
6433FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
6434/** Opcode 0x0f 0xd1. */
6435FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
6436/** Opcode 0x0f 0xd2. */
6437FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
6438/** Opcode 0x0f 0xd3. */
6439FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
6440/** Opcode 0x0f 0xd4. */
6441FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
6442/** Opcode 0x0f 0xd5. */
6443FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
6444/** Opcode 0x0f 0xd6. */
6445FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
6446
6447
6448/** Opcode 0x0f 0xd7. */
6449FNIEMOP_DEF(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq)
6450{
6451 /* Docs says register only. */
6452 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6453 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) /** @todo test that this is registers only. */
6454 return IEMOP_RAISE_INVALID_OPCODE();
6455
6456 /* Note! Taking the lazy approch here wrt the high 32-bits of the GREG. */
6457 /** @todo testcase: Check that the instruction implicitly clears the high
6458 * bits in 64-bit mode. The REX.W is first necessary when VLMAX > 256
6459 * and opcode modifications are made to work with the whole width (not
6460 * just 128). */
6461 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6462 {
6463 case IEM_OP_PRF_SIZE_OP: /* SSE */
6464 IEMOP_MNEMONIC("pmovmskb Gd,Nq");
6465 IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_SSE | DISOPTYPE_HARMLESS);
6466 IEM_MC_BEGIN(2, 0);
6467 IEM_MC_ARG(uint64_t *, pDst, 0);
6468 IEM_MC_ARG(uint128_t const *, pSrc, 1);
6469 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
6470 IEM_MC_REF_GREG_U64(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6471 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6472 IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_pmovmskb_u128, pDst, pSrc);
6473 IEM_MC_ADVANCE_RIP();
6474 IEM_MC_END();
6475 return VINF_SUCCESS;
6476
6477 case 0: /* MMX */
6478 IEMOP_MNEMONIC("pmovmskb Gd,Udq");
6479 IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_MMX | DISOPTYPE_HARMLESS);
6480 IEM_MC_BEGIN(2, 0);
6481 IEM_MC_ARG(uint64_t *, pDst, 0);
6482 IEM_MC_ARG(uint64_t const *, pSrc, 1);
6483 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
6484 IEM_MC_REF_GREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
6485 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
6486 IEM_MC_CALL_MMX_AIMPL_2(iemAImpl_pmovmskb_u64, pDst, pSrc);
6487 IEM_MC_ADVANCE_RIP();
6488 IEM_MC_END();
6489 return VINF_SUCCESS;
6490
6491 default:
6492 return IEMOP_RAISE_INVALID_OPCODE();
6493 }
6494}
6495
6496
6497/** Opcode 0x0f 0xd8. */
6498FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
6499/** Opcode 0x0f 0xd9. */
6500FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
6501/** Opcode 0x0f 0xda. */
6502FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
6503/** Opcode 0x0f 0xdb. */
6504FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
6505/** Opcode 0x0f 0xdc. */
6506FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
6507/** Opcode 0x0f 0xdd. */
6508FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
6509/** Opcode 0x0f 0xde. */
6510FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
6511/** Opcode 0x0f 0xdf. */
6512FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
6513/** Opcode 0x0f 0xe0. */
6514FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
6515/** Opcode 0x0f 0xe1. */
6516FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
6517/** Opcode 0x0f 0xe2. */
6518FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
6519/** Opcode 0x0f 0xe3. */
6520FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
6521/** Opcode 0x0f 0xe4. */
6522FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
6523/** Opcode 0x0f 0xe5. */
6524FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
6525/** Opcode 0x0f 0xe6. */
6526FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
6527/** Opcode 0x0f 0xe7. */
6528FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
6529/** Opcode 0x0f 0xe8. */
6530FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
6531/** Opcode 0x0f 0xe9. */
6532FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
6533/** Opcode 0x0f 0xea. */
6534FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
6535/** Opcode 0x0f 0xeb. */
6536FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
6537/** Opcode 0x0f 0xec. */
6538FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
6539/** Opcode 0x0f 0xed. */
6540FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
6541/** Opcode 0x0f 0xee. */
6542FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
6543
6544
6545/** Opcode 0x0f 0xef. */
6546FNIEMOP_DEF(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq)
6547{
6548 IEMOP_MNEMONIC("pxor");
6549 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pxor);
6550}
6551
6552
6553/** Opcode 0x0f 0xf0. */
6554FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
6555/** Opcode 0x0f 0xf1. */
6556FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
6557/** Opcode 0x0f 0xf2. */
6558FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
6559/** Opcode 0x0f 0xf3. */
6560FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
6561/** Opcode 0x0f 0xf4. */
6562FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
6563/** Opcode 0x0f 0xf5. */
6564FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
6565/** Opcode 0x0f 0xf6. */
6566FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
6567/** Opcode 0x0f 0xf7. */
6568FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
6569/** Opcode 0x0f 0xf8. */
6570FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq); //NEXT
6571/** Opcode 0x0f 0xf9. */
6572FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
6573/** Opcode 0x0f 0xfa. */
6574FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
6575/** Opcode 0x0f 0xfb. */
6576FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
6577/** Opcode 0x0f 0xfc. */
6578FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
6579/** Opcode 0x0f 0xfd. */
6580FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
6581/** Opcode 0x0f 0xfe. */
6582FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
6583
6584
6585const PFNIEMOP g_apfnTwoByteMap[256] =
6586{
6587 /* 0x00 */ iemOp_Grp6,
6588 /* 0x01 */ iemOp_Grp7,
6589 /* 0x02 */ iemOp_lar_Gv_Ew,
6590 /* 0x03 */ iemOp_lsl_Gv_Ew,
6591 /* 0x04 */ iemOp_Invalid,
6592 /* 0x05 */ iemOp_syscall,
6593 /* 0x06 */ iemOp_clts,
6594 /* 0x07 */ iemOp_sysret,
6595 /* 0x08 */ iemOp_invd,
6596 /* 0x09 */ iemOp_wbinvd,
6597 /* 0x0a */ iemOp_Invalid,
6598 /* 0x0b */ iemOp_ud2,
6599 /* 0x0c */ iemOp_Invalid,
6600 /* 0x0d */ iemOp_nop_Ev_GrpP,
6601 /* 0x0e */ iemOp_femms,
6602 /* 0x0f */ iemOp_3Dnow,
6603 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
6604 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
6605 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
6606 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
6607 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
6608 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
6609 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
6610 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
6611 /* 0x18 */ iemOp_prefetch_Grp16,
6612 /* 0x19 */ iemOp_nop_Ev,
6613 /* 0x1a */ iemOp_nop_Ev,
6614 /* 0x1b */ iemOp_nop_Ev,
6615 /* 0x1c */ iemOp_nop_Ev,
6616 /* 0x1d */ iemOp_nop_Ev,
6617 /* 0x1e */ iemOp_nop_Ev,
6618 /* 0x1f */ iemOp_nop_Ev,
6619 /* 0x20 */ iemOp_mov_Rd_Cd,
6620 /* 0x21 */ iemOp_mov_Rd_Dd,
6621 /* 0x22 */ iemOp_mov_Cd_Rd,
6622 /* 0x23 */ iemOp_mov_Dd_Rd,
6623 /* 0x24 */ iemOp_mov_Rd_Td,
6624 /* 0x25 */ iemOp_Invalid,
6625 /* 0x26 */ iemOp_mov_Td_Rd,
6626 /* 0x27 */ iemOp_Invalid,
6627 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
6628 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
6629 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
6630 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
6631 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
6632 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
6633 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
6634 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
6635 /* 0x30 */ iemOp_wrmsr,
6636 /* 0x31 */ iemOp_rdtsc,
6637 /* 0x32 */ iemOp_rdmsr,
6638 /* 0x33 */ iemOp_rdpmc,
6639 /* 0x34 */ iemOp_sysenter,
6640 /* 0x35 */ iemOp_sysexit,
6641 /* 0x36 */ iemOp_Invalid,
6642 /* 0x37 */ iemOp_getsec,
6643 /* 0x38 */ iemOp_3byte_Esc_A4,
6644 /* 0x39 */ iemOp_Invalid,
6645 /* 0x3a */ iemOp_3byte_Esc_A5,
6646 /* 0x3b */ iemOp_Invalid,
6647 /* 0x3c */ iemOp_movnti_Gv_Ev/*??*/,
6648 /* 0x3d */ iemOp_Invalid,
6649 /* 0x3e */ iemOp_Invalid,
6650 /* 0x3f */ iemOp_Invalid,
6651 /* 0x40 */ iemOp_cmovo_Gv_Ev,
6652 /* 0x41 */ iemOp_cmovno_Gv_Ev,
6653 /* 0x42 */ iemOp_cmovc_Gv_Ev,
6654 /* 0x43 */ iemOp_cmovnc_Gv_Ev,
6655 /* 0x44 */ iemOp_cmove_Gv_Ev,
6656 /* 0x45 */ iemOp_cmovne_Gv_Ev,
6657 /* 0x46 */ iemOp_cmovbe_Gv_Ev,
6658 /* 0x47 */ iemOp_cmovnbe_Gv_Ev,
6659 /* 0x48 */ iemOp_cmovs_Gv_Ev,
6660 /* 0x49 */ iemOp_cmovns_Gv_Ev,
6661 /* 0x4a */ iemOp_cmovp_Gv_Ev,
6662 /* 0x4b */ iemOp_cmovnp_Gv_Ev,
6663 /* 0x4c */ iemOp_cmovl_Gv_Ev,
6664 /* 0x4d */ iemOp_cmovnl_Gv_Ev,
6665 /* 0x4e */ iemOp_cmovle_Gv_Ev,
6666 /* 0x4f */ iemOp_cmovnle_Gv_Ev,
6667 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
6668 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
6669 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
6670 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
6671 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
6672 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
6673 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
6674 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
6675 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
6676 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
6677 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
6678 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
6679 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
6680 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
6681 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
6682 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
6683 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
6684 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
6685 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
6686 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
6687 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
6688 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
6689 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
6690 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
6691 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
6692 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
6693 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
6694 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
6695 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
6696 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
6697 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
6698 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
6699 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
6700 /* 0x71 */ iemOp_Grp12,
6701 /* 0x72 */ iemOp_Grp13,
6702 /* 0x73 */ iemOp_Grp14,
6703 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
6704 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
6705 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
6706 /* 0x77 */ iemOp_emms,
6707 /* 0x78 */ iemOp_vmread_AmdGrp17,
6708 /* 0x79 */ iemOp_vmwrite,
6709 /* 0x7a */ iemOp_Invalid,
6710 /* 0x7b */ iemOp_Invalid,
6711 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
6712 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
6713 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
6714 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
6715 /* 0x80 */ iemOp_jo_Jv,
6716 /* 0x81 */ iemOp_jno_Jv,
6717 /* 0x82 */ iemOp_jc_Jv,
6718 /* 0x83 */ iemOp_jnc_Jv,
6719 /* 0x84 */ iemOp_je_Jv,
6720 /* 0x85 */ iemOp_jne_Jv,
6721 /* 0x86 */ iemOp_jbe_Jv,
6722 /* 0x87 */ iemOp_jnbe_Jv,
6723 /* 0x88 */ iemOp_js_Jv,
6724 /* 0x89 */ iemOp_jns_Jv,
6725 /* 0x8a */ iemOp_jp_Jv,
6726 /* 0x8b */ iemOp_jnp_Jv,
6727 /* 0x8c */ iemOp_jl_Jv,
6728 /* 0x8d */ iemOp_jnl_Jv,
6729 /* 0x8e */ iemOp_jle_Jv,
6730 /* 0x8f */ iemOp_jnle_Jv,
6731 /* 0x90 */ iemOp_seto_Eb,
6732 /* 0x91 */ iemOp_setno_Eb,
6733 /* 0x92 */ iemOp_setc_Eb,
6734 /* 0x93 */ iemOp_setnc_Eb,
6735 /* 0x94 */ iemOp_sete_Eb,
6736 /* 0x95 */ iemOp_setne_Eb,
6737 /* 0x96 */ iemOp_setbe_Eb,
6738 /* 0x97 */ iemOp_setnbe_Eb,
6739 /* 0x98 */ iemOp_sets_Eb,
6740 /* 0x99 */ iemOp_setns_Eb,
6741 /* 0x9a */ iemOp_setp_Eb,
6742 /* 0x9b */ iemOp_setnp_Eb,
6743 /* 0x9c */ iemOp_setl_Eb,
6744 /* 0x9d */ iemOp_setnl_Eb,
6745 /* 0x9e */ iemOp_setle_Eb,
6746 /* 0x9f */ iemOp_setnle_Eb,
6747 /* 0xa0 */ iemOp_push_fs,
6748 /* 0xa1 */ iemOp_pop_fs,
6749 /* 0xa2 */ iemOp_cpuid,
6750 /* 0xa3 */ iemOp_bt_Ev_Gv,
6751 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib,
6752 /* 0xa5 */ iemOp_shld_Ev_Gv_CL,
6753 /* 0xa6 */ iemOp_Invalid,
6754 /* 0xa7 */ iemOp_Invalid,
6755 /* 0xa8 */ iemOp_push_gs,
6756 /* 0xa9 */ iemOp_pop_gs,
6757 /* 0xaa */ iemOp_rsm,
6758 /* 0xab */ iemOp_bts_Ev_Gv,
6759 /* 0xac */ iemOp_shrd_Ev_Gv_Ib,
6760 /* 0xad */ iemOp_shrd_Ev_Gv_CL,
6761 /* 0xae */ iemOp_Grp15,
6762 /* 0xaf */ iemOp_imul_Gv_Ev,
6763 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb,
6764 /* 0xb1 */ iemOp_cmpxchg_Ev_Gv,
6765 /* 0xb2 */ iemOp_lss_Gv_Mp,
6766 /* 0xb3 */ iemOp_btr_Ev_Gv,
6767 /* 0xb4 */ iemOp_lfs_Gv_Mp,
6768 /* 0xb5 */ iemOp_lgs_Gv_Mp,
6769 /* 0xb6 */ iemOp_movzx_Gv_Eb,
6770 /* 0xb7 */ iemOp_movzx_Gv_Ew,
6771 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,
6772 /* 0xb9 */ iemOp_Grp10,
6773 /* 0xba */ iemOp_Grp8,
6774 /* 0xbd */ iemOp_btc_Ev_Gv,
6775 /* 0xbc */ iemOp_bsf_Gv_Ev,
6776 /* 0xbd */ iemOp_bsr_Gv_Ev,
6777 /* 0xbe */ iemOp_movsx_Gv_Eb,
6778 /* 0xbf */ iemOp_movsx_Gv_Ew,
6779 /* 0xc0 */ iemOp_xadd_Eb_Gb,
6780 /* 0xc1 */ iemOp_xadd_Ev_Gv,
6781 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
6782 /* 0xc3 */ iemOp_movnti_My_Gy,
6783 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
6784 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
6785 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
6786 /* 0xc7 */ iemOp_Grp9,
6787 /* 0xc8 */ iemOp_bswap_rAX_r8,
6788 /* 0xc9 */ iemOp_bswap_rCX_r9,
6789 /* 0xca */ iemOp_bswap_rDX_r10,
6790 /* 0xcb */ iemOp_bswap_rBX_r11,
6791 /* 0xcc */ iemOp_bswap_rSP_r12,
6792 /* 0xcd */ iemOp_bswap_rBP_r13,
6793 /* 0xce */ iemOp_bswap_rSI_r14,
6794 /* 0xcf */ iemOp_bswap_rDI_r15,
6795 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
6796 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
6797 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
6798 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
6799 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
6800 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
6801 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
6802 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
6803 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
6804 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
6805 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
6806 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
6807 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
6808 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
6809 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
6810 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
6811 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
6812 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
6813 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
6814 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
6815 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
6816 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
6817 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
6818 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
6819 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
6820 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
6821 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
6822 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
6823 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
6824 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
6825 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
6826 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
6827 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
6828 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
6829 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
6830 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
6831 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
6832 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
6833 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
6834 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
6835 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
6836 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
6837 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
6838 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
6839 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
6840 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
6841 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
6842 /* 0xff */ iemOp_Invalid
6843};
6844
6845/** @} */
6846
6847
6848/** @name One byte opcodes.
6849 *
6850 * @{
6851 */
6852
6853/** Opcode 0x00. */
6854FNIEMOP_DEF(iemOp_add_Eb_Gb)
6855{
6856 IEMOP_MNEMONIC("add Eb,Gb");
6857 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
6858}
6859
6860
6861/** Opcode 0x01. */
6862FNIEMOP_DEF(iemOp_add_Ev_Gv)
6863{
6864 IEMOP_MNEMONIC("add Ev,Gv");
6865 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
6866}
6867
6868
6869/** Opcode 0x02. */
6870FNIEMOP_DEF(iemOp_add_Gb_Eb)
6871{
6872 IEMOP_MNEMONIC("add Gb,Eb");
6873 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
6874}
6875
6876
6877/** Opcode 0x03. */
6878FNIEMOP_DEF(iemOp_add_Gv_Ev)
6879{
6880 IEMOP_MNEMONIC("add Gv,Ev");
6881 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
6882}
6883
6884
6885/** Opcode 0x04. */
6886FNIEMOP_DEF(iemOp_add_Al_Ib)
6887{
6888 IEMOP_MNEMONIC("add al,Ib");
6889 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
6890}
6891
6892
6893/** Opcode 0x05. */
6894FNIEMOP_DEF(iemOp_add_eAX_Iz)
6895{
6896 IEMOP_MNEMONIC("add rAX,Iz");
6897 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
6898}
6899
6900
6901/** Opcode 0x06. */
6902FNIEMOP_DEF(iemOp_push_ES)
6903{
6904 IEMOP_MNEMONIC("push es");
6905 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
6906}
6907
6908
6909/** Opcode 0x07. */
6910FNIEMOP_DEF(iemOp_pop_ES)
6911{
6912 IEMOP_MNEMONIC("pop es");
6913 IEMOP_HLP_NO_64BIT();
6914 IEMOP_HLP_NO_LOCK_PREFIX();
6915 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
6916}
6917
6918
6919/** Opcode 0x08. */
6920FNIEMOP_DEF(iemOp_or_Eb_Gb)
6921{
6922 IEMOP_MNEMONIC("or Eb,Gb");
6923 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6924 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
6925}
6926
6927
6928/** Opcode 0x09. */
6929FNIEMOP_DEF(iemOp_or_Ev_Gv)
6930{
6931 IEMOP_MNEMONIC("or Ev,Gv ");
6932 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6933 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
6934}
6935
6936
6937/** Opcode 0x0a. */
6938FNIEMOP_DEF(iemOp_or_Gb_Eb)
6939{
6940 IEMOP_MNEMONIC("or Gb,Eb");
6941 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6942 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
6943}
6944
6945
6946/** Opcode 0x0b. */
6947FNIEMOP_DEF(iemOp_or_Gv_Ev)
6948{
6949 IEMOP_MNEMONIC("or Gv,Ev");
6950 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6951 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
6952}
6953
6954
6955/** Opcode 0x0c. */
6956FNIEMOP_DEF(iemOp_or_Al_Ib)
6957{
6958 IEMOP_MNEMONIC("or al,Ib");
6959 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6960 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
6961}
6962
6963
6964/** Opcode 0x0d. */
6965FNIEMOP_DEF(iemOp_or_eAX_Iz)
6966{
6967 IEMOP_MNEMONIC("or rAX,Iz");
6968 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6969 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
6970}
6971
6972
6973/** Opcode 0x0e. */
6974FNIEMOP_DEF(iemOp_push_CS)
6975{
6976 IEMOP_MNEMONIC("push cs");
6977 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
6978}
6979
6980
6981/** Opcode 0x0f. */
6982FNIEMOP_DEF(iemOp_2byteEscape)
6983{
6984 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
6985 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
6986}
6987
6988/** Opcode 0x10. */
6989FNIEMOP_DEF(iemOp_adc_Eb_Gb)
6990{
6991 IEMOP_MNEMONIC("adc Eb,Gb");
6992 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
6993}
6994
6995
6996/** Opcode 0x11. */
6997FNIEMOP_DEF(iemOp_adc_Ev_Gv)
6998{
6999 IEMOP_MNEMONIC("adc Ev,Gv");
7000 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
7001}
7002
7003
7004/** Opcode 0x12. */
7005FNIEMOP_DEF(iemOp_adc_Gb_Eb)
7006{
7007 IEMOP_MNEMONIC("adc Gb,Eb");
7008 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
7009}
7010
7011
7012/** Opcode 0x13. */
7013FNIEMOP_DEF(iemOp_adc_Gv_Ev)
7014{
7015 IEMOP_MNEMONIC("adc Gv,Ev");
7016 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
7017}
7018
7019
7020/** Opcode 0x14. */
7021FNIEMOP_DEF(iemOp_adc_Al_Ib)
7022{
7023 IEMOP_MNEMONIC("adc al,Ib");
7024 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
7025}
7026
7027
7028/** Opcode 0x15. */
7029FNIEMOP_DEF(iemOp_adc_eAX_Iz)
7030{
7031 IEMOP_MNEMONIC("adc rAX,Iz");
7032 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
7033}
7034
7035
7036/** Opcode 0x16. */
7037FNIEMOP_DEF(iemOp_push_SS)
7038{
7039 IEMOP_MNEMONIC("push ss");
7040 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
7041}
7042
7043
7044/** Opcode 0x17. */
7045FNIEMOP_DEF(iemOp_pop_SS)
7046{
7047 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
7048 IEMOP_HLP_NO_LOCK_PREFIX();
7049 IEMOP_HLP_NO_64BIT();
7050 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
7051}
7052
7053
7054/** Opcode 0x18. */
7055FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
7056{
7057 IEMOP_MNEMONIC("sbb Eb,Gb");
7058 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
7059}
7060
7061
7062/** Opcode 0x19. */
7063FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
7064{
7065 IEMOP_MNEMONIC("sbb Ev,Gv");
7066 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
7067}
7068
7069
7070/** Opcode 0x1a. */
7071FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
7072{
7073 IEMOP_MNEMONIC("sbb Gb,Eb");
7074 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
7075}
7076
7077
7078/** Opcode 0x1b. */
7079FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
7080{
7081 IEMOP_MNEMONIC("sbb Gv,Ev");
7082 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
7083}
7084
7085
7086/** Opcode 0x1c. */
7087FNIEMOP_DEF(iemOp_sbb_Al_Ib)
7088{
7089 IEMOP_MNEMONIC("sbb al,Ib");
7090 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
7091}
7092
7093
7094/** Opcode 0x1d. */
7095FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
7096{
7097 IEMOP_MNEMONIC("sbb rAX,Iz");
7098 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
7099}
7100
7101
7102/** Opcode 0x1e. */
7103FNIEMOP_DEF(iemOp_push_DS)
7104{
7105 IEMOP_MNEMONIC("push ds");
7106 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
7107}
7108
7109
7110/** Opcode 0x1f. */
7111FNIEMOP_DEF(iemOp_pop_DS)
7112{
7113 IEMOP_MNEMONIC("pop ds");
7114 IEMOP_HLP_NO_LOCK_PREFIX();
7115 IEMOP_HLP_NO_64BIT();
7116 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
7117}
7118
7119
7120/** Opcode 0x20. */
7121FNIEMOP_DEF(iemOp_and_Eb_Gb)
7122{
7123 IEMOP_MNEMONIC("and Eb,Gb");
7124 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7125 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
7126}
7127
7128
7129/** Opcode 0x21. */
7130FNIEMOP_DEF(iemOp_and_Ev_Gv)
7131{
7132 IEMOP_MNEMONIC("and Ev,Gv");
7133 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7134 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
7135}
7136
7137
7138/** Opcode 0x22. */
7139FNIEMOP_DEF(iemOp_and_Gb_Eb)
7140{
7141 IEMOP_MNEMONIC("and Gb,Eb");
7142 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7143 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
7144}
7145
7146
7147/** Opcode 0x23. */
7148FNIEMOP_DEF(iemOp_and_Gv_Ev)
7149{
7150 IEMOP_MNEMONIC("and Gv,Ev");
7151 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7152 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
7153}
7154
7155
7156/** Opcode 0x24. */
7157FNIEMOP_DEF(iemOp_and_Al_Ib)
7158{
7159 IEMOP_MNEMONIC("and al,Ib");
7160 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7161 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
7162}
7163
7164
7165/** Opcode 0x25. */
7166FNIEMOP_DEF(iemOp_and_eAX_Iz)
7167{
7168 IEMOP_MNEMONIC("and rAX,Iz");
7169 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7170 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
7171}
7172
7173
7174/** Opcode 0x26. */
7175FNIEMOP_DEF(iemOp_seg_ES)
7176{
7177 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg es");
7178 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
7179 pIemCpu->iEffSeg = X86_SREG_ES;
7180
7181 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7182 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7183}
7184
7185
7186/** Opcode 0x27. */
7187FNIEMOP_STUB(iemOp_daa);
7188
7189
7190/** Opcode 0x28. */
7191FNIEMOP_DEF(iemOp_sub_Eb_Gb)
7192{
7193 IEMOP_MNEMONIC("sub Eb,Gb");
7194 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
7195}
7196
7197
7198/** Opcode 0x29. */
7199FNIEMOP_DEF(iemOp_sub_Ev_Gv)
7200{
7201 IEMOP_MNEMONIC("sub Ev,Gv");
7202 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
7203}
7204
7205
7206/** Opcode 0x2a. */
7207FNIEMOP_DEF(iemOp_sub_Gb_Eb)
7208{
7209 IEMOP_MNEMONIC("sub Gb,Eb");
7210 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
7211}
7212
7213
7214/** Opcode 0x2b. */
7215FNIEMOP_DEF(iemOp_sub_Gv_Ev)
7216{
7217 IEMOP_MNEMONIC("sub Gv,Ev");
7218 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
7219}
7220
7221
7222/** Opcode 0x2c. */
7223FNIEMOP_DEF(iemOp_sub_Al_Ib)
7224{
7225 IEMOP_MNEMONIC("sub al,Ib");
7226 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
7227}
7228
7229
7230/** Opcode 0x2d. */
7231FNIEMOP_DEF(iemOp_sub_eAX_Iz)
7232{
7233 IEMOP_MNEMONIC("sub rAX,Iz");
7234 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
7235}
7236
7237
7238/** Opcode 0x2e. */
7239FNIEMOP_DEF(iemOp_seg_CS)
7240{
7241 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg cs");
7242 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
7243 pIemCpu->iEffSeg = X86_SREG_CS;
7244
7245 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7246 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7247}
7248
7249
7250/** Opcode 0x2f. */
7251FNIEMOP_STUB(iemOp_das);
7252
7253
7254/** Opcode 0x30. */
7255FNIEMOP_DEF(iemOp_xor_Eb_Gb)
7256{
7257 IEMOP_MNEMONIC("xor Eb,Gb");
7258 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7259 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
7260}
7261
7262
7263/** Opcode 0x31. */
7264FNIEMOP_DEF(iemOp_xor_Ev_Gv)
7265{
7266 IEMOP_MNEMONIC("xor Ev,Gv");
7267 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7268 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
7269}
7270
7271
7272/** Opcode 0x32. */
7273FNIEMOP_DEF(iemOp_xor_Gb_Eb)
7274{
7275 IEMOP_MNEMONIC("xor Gb,Eb");
7276 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7277 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
7278}
7279
7280
7281/** Opcode 0x33. */
7282FNIEMOP_DEF(iemOp_xor_Gv_Ev)
7283{
7284 IEMOP_MNEMONIC("xor Gv,Ev");
7285 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7286 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
7287}
7288
7289
7290/** Opcode 0x34. */
7291FNIEMOP_DEF(iemOp_xor_Al_Ib)
7292{
7293 IEMOP_MNEMONIC("xor al,Ib");
7294 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7295 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
7296}
7297
7298
7299/** Opcode 0x35. */
7300FNIEMOP_DEF(iemOp_xor_eAX_Iz)
7301{
7302 IEMOP_MNEMONIC("xor rAX,Iz");
7303 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7304 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
7305}
7306
7307
7308/** Opcode 0x36. */
7309FNIEMOP_DEF(iemOp_seg_SS)
7310{
7311 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ss");
7312 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
7313 pIemCpu->iEffSeg = X86_SREG_SS;
7314
7315 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7316 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7317}
7318
7319
7320/** Opcode 0x37. */
7321FNIEMOP_STUB(iemOp_aaa);
7322
7323
7324/** Opcode 0x38. */
7325FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
7326{
7327 IEMOP_MNEMONIC("cmp Eb,Gb");
7328 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7329 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
7330}
7331
7332
7333/** Opcode 0x39. */
7334FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
7335{
7336 IEMOP_MNEMONIC("cmp Ev,Gv");
7337 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7338 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
7339}
7340
7341
7342/** Opcode 0x3a. */
7343FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
7344{
7345 IEMOP_MNEMONIC("cmp Gb,Eb");
7346 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
7347}
7348
7349
7350/** Opcode 0x3b. */
7351FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
7352{
7353 IEMOP_MNEMONIC("cmp Gv,Ev");
7354 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
7355}
7356
7357
7358/** Opcode 0x3c. */
7359FNIEMOP_DEF(iemOp_cmp_Al_Ib)
7360{
7361 IEMOP_MNEMONIC("cmp al,Ib");
7362 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
7363}
7364
7365
7366/** Opcode 0x3d. */
7367FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
7368{
7369 IEMOP_MNEMONIC("cmp rAX,Iz");
7370 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
7371}
7372
7373
7374/** Opcode 0x3e. */
7375FNIEMOP_DEF(iemOp_seg_DS)
7376{
7377 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ds");
7378 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
7379 pIemCpu->iEffSeg = X86_SREG_DS;
7380
7381 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7382 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7383}
7384
7385
7386/** Opcode 0x3f. */
7387FNIEMOP_STUB(iemOp_aas);
7388
7389/**
7390 * Common 'inc/dec/not/neg register' helper.
7391 */
7392FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
7393{
7394 IEMOP_HLP_NO_LOCK_PREFIX();
7395 switch (pIemCpu->enmEffOpSize)
7396 {
7397 case IEMMODE_16BIT:
7398 IEM_MC_BEGIN(2, 0);
7399 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
7400 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7401 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
7402 IEM_MC_REF_EFLAGS(pEFlags);
7403 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
7404 IEM_MC_ADVANCE_RIP();
7405 IEM_MC_END();
7406 return VINF_SUCCESS;
7407
7408 case IEMMODE_32BIT:
7409 IEM_MC_BEGIN(2, 0);
7410 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
7411 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7412 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
7413 IEM_MC_REF_EFLAGS(pEFlags);
7414 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
7415 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
7416 IEM_MC_ADVANCE_RIP();
7417 IEM_MC_END();
7418 return VINF_SUCCESS;
7419
7420 case IEMMODE_64BIT:
7421 IEM_MC_BEGIN(2, 0);
7422 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
7423 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7424 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
7425 IEM_MC_REF_EFLAGS(pEFlags);
7426 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
7427 IEM_MC_ADVANCE_RIP();
7428 IEM_MC_END();
7429 return VINF_SUCCESS;
7430 }
7431 return VINF_SUCCESS;
7432}
7433
7434
7435/** Opcode 0x40. */
7436FNIEMOP_DEF(iemOp_inc_eAX)
7437{
7438 /*
7439 * This is a REX prefix in 64-bit mode.
7440 */
7441 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7442 {
7443 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex");
7444 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
7445
7446 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7447 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7448 }
7449
7450 IEMOP_MNEMONIC("inc eAX");
7451 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
7452}
7453
7454
7455/** Opcode 0x41. */
7456FNIEMOP_DEF(iemOp_inc_eCX)
7457{
7458 /*
7459 * This is a REX prefix in 64-bit mode.
7460 */
7461 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7462 {
7463 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.b");
7464 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
7465 pIemCpu->uRexB = 1 << 3;
7466
7467 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7468 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7469 }
7470
7471 IEMOP_MNEMONIC("inc eCX");
7472 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
7473}
7474
7475
7476/** Opcode 0x42. */
7477FNIEMOP_DEF(iemOp_inc_eDX)
7478{
7479 /*
7480 * This is a REX prefix in 64-bit mode.
7481 */
7482 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7483 {
7484 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.x");
7485 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
7486 pIemCpu->uRexIndex = 1 << 3;
7487
7488 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7489 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7490 }
7491
7492 IEMOP_MNEMONIC("inc eDX");
7493 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
7494}
7495
7496
7497
7498/** Opcode 0x43. */
7499FNIEMOP_DEF(iemOp_inc_eBX)
7500{
7501 /*
7502 * This is a REX prefix in 64-bit mode.
7503 */
7504 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7505 {
7506 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bx");
7507 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
7508 pIemCpu->uRexB = 1 << 3;
7509 pIemCpu->uRexIndex = 1 << 3;
7510
7511 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7512 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7513 }
7514
7515 IEMOP_MNEMONIC("inc eBX");
7516 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
7517}
7518
7519
7520/** Opcode 0x44. */
7521FNIEMOP_DEF(iemOp_inc_eSP)
7522{
7523 /*
7524 * This is a REX prefix in 64-bit mode.
7525 */
7526 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7527 {
7528 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.r");
7529 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
7530 pIemCpu->uRexReg = 1 << 3;
7531
7532 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7533 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7534 }
7535
7536 IEMOP_MNEMONIC("inc eSP");
7537 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
7538}
7539
7540
7541/** Opcode 0x45. */
7542FNIEMOP_DEF(iemOp_inc_eBP)
7543{
7544 /*
7545 * This is a REX prefix in 64-bit mode.
7546 */
7547 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7548 {
7549 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rb");
7550 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
7551 pIemCpu->uRexReg = 1 << 3;
7552 pIemCpu->uRexB = 1 << 3;
7553
7554 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7555 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7556 }
7557
7558 IEMOP_MNEMONIC("inc eBP");
7559 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
7560}
7561
7562
7563/** Opcode 0x46. */
7564FNIEMOP_DEF(iemOp_inc_eSI)
7565{
7566 /*
7567 * This is a REX prefix in 64-bit mode.
7568 */
7569 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7570 {
7571 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rx");
7572 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
7573 pIemCpu->uRexReg = 1 << 3;
7574 pIemCpu->uRexIndex = 1 << 3;
7575
7576 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7577 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7578 }
7579
7580 IEMOP_MNEMONIC("inc eSI");
7581 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
7582}
7583
7584
7585/** Opcode 0x47. */
7586FNIEMOP_DEF(iemOp_inc_eDI)
7587{
7588 /*
7589 * This is a REX prefix in 64-bit mode.
7590 */
7591 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7592 {
7593 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbx");
7594 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
7595 pIemCpu->uRexReg = 1 << 3;
7596 pIemCpu->uRexB = 1 << 3;
7597 pIemCpu->uRexIndex = 1 << 3;
7598
7599 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7600 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7601 }
7602
7603 IEMOP_MNEMONIC("inc eDI");
7604 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
7605}
7606
7607
7608/** Opcode 0x48. */
7609FNIEMOP_DEF(iemOp_dec_eAX)
7610{
7611 /*
7612 * This is a REX prefix in 64-bit mode.
7613 */
7614 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7615 {
7616 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.w");
7617 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
7618 iemRecalEffOpSize(pIemCpu);
7619
7620 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7621 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7622 }
7623
7624 IEMOP_MNEMONIC("dec eAX");
7625 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
7626}
7627
7628
7629/** Opcode 0x49. */
7630FNIEMOP_DEF(iemOp_dec_eCX)
7631{
7632 /*
7633 * This is a REX prefix in 64-bit mode.
7634 */
7635 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7636 {
7637 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bw");
7638 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
7639 pIemCpu->uRexB = 1 << 3;
7640 iemRecalEffOpSize(pIemCpu);
7641
7642 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7643 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7644 }
7645
7646 IEMOP_MNEMONIC("dec eCX");
7647 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
7648}
7649
7650
7651/** Opcode 0x4a. */
7652FNIEMOP_DEF(iemOp_dec_eDX)
7653{
7654 /*
7655 * This is a REX prefix in 64-bit mode.
7656 */
7657 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7658 {
7659 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.xw");
7660 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7661 pIemCpu->uRexIndex = 1 << 3;
7662 iemRecalEffOpSize(pIemCpu);
7663
7664 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7665 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7666 }
7667
7668 IEMOP_MNEMONIC("dec eDX");
7669 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
7670}
7671
7672
7673/** Opcode 0x4b. */
7674FNIEMOP_DEF(iemOp_dec_eBX)
7675{
7676 /*
7677 * This is a REX prefix in 64-bit mode.
7678 */
7679 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7680 {
7681 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bxw");
7682 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7683 pIemCpu->uRexB = 1 << 3;
7684 pIemCpu->uRexIndex = 1 << 3;
7685 iemRecalEffOpSize(pIemCpu);
7686
7687 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7688 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7689 }
7690
7691 IEMOP_MNEMONIC("dec eBX");
7692 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
7693}
7694
7695
7696/** Opcode 0x4c. */
7697FNIEMOP_DEF(iemOp_dec_eSP)
7698{
7699 /*
7700 * This is a REX prefix in 64-bit mode.
7701 */
7702 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7703 {
7704 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rw");
7705 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
7706 pIemCpu->uRexReg = 1 << 3;
7707 iemRecalEffOpSize(pIemCpu);
7708
7709 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7710 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7711 }
7712
7713 IEMOP_MNEMONIC("dec eSP");
7714 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
7715}
7716
7717
7718/** Opcode 0x4d. */
7719FNIEMOP_DEF(iemOp_dec_eBP)
7720{
7721 /*
7722 * This is a REX prefix in 64-bit mode.
7723 */
7724 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7725 {
7726 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbw");
7727 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
7728 pIemCpu->uRexReg = 1 << 3;
7729 pIemCpu->uRexB = 1 << 3;
7730 iemRecalEffOpSize(pIemCpu);
7731
7732 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7733 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7734 }
7735
7736 IEMOP_MNEMONIC("dec eBP");
7737 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
7738}
7739
7740
7741/** Opcode 0x4e. */
7742FNIEMOP_DEF(iemOp_dec_eSI)
7743{
7744 /*
7745 * This is a REX prefix in 64-bit mode.
7746 */
7747 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7748 {
7749 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rxw");
7750 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7751 pIemCpu->uRexReg = 1 << 3;
7752 pIemCpu->uRexIndex = 1 << 3;
7753 iemRecalEffOpSize(pIemCpu);
7754
7755 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7756 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7757 }
7758
7759 IEMOP_MNEMONIC("dec eSI");
7760 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
7761}
7762
7763
7764/** Opcode 0x4f. */
7765FNIEMOP_DEF(iemOp_dec_eDI)
7766{
7767 /*
7768 * This is a REX prefix in 64-bit mode.
7769 */
7770 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7771 {
7772 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbxw");
7773 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7774 pIemCpu->uRexReg = 1 << 3;
7775 pIemCpu->uRexB = 1 << 3;
7776 pIemCpu->uRexIndex = 1 << 3;
7777 iemRecalEffOpSize(pIemCpu);
7778
7779 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7780 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7781 }
7782
7783 IEMOP_MNEMONIC("dec eDI");
7784 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
7785}
7786
7787
7788/**
7789 * Common 'push register' helper.
7790 */
7791FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
7792{
7793 IEMOP_HLP_NO_LOCK_PREFIX();
7794 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7795 {
7796 iReg |= pIemCpu->uRexB;
7797 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
7798 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
7799 }
7800
7801 switch (pIemCpu->enmEffOpSize)
7802 {
7803 case IEMMODE_16BIT:
7804 IEM_MC_BEGIN(0, 1);
7805 IEM_MC_LOCAL(uint16_t, u16Value);
7806 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
7807 IEM_MC_PUSH_U16(u16Value);
7808 IEM_MC_ADVANCE_RIP();
7809 IEM_MC_END();
7810 break;
7811
7812 case IEMMODE_32BIT:
7813 IEM_MC_BEGIN(0, 1);
7814 IEM_MC_LOCAL(uint32_t, u32Value);
7815 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
7816 IEM_MC_PUSH_U32(u32Value);
7817 IEM_MC_ADVANCE_RIP();
7818 IEM_MC_END();
7819 break;
7820
7821 case IEMMODE_64BIT:
7822 IEM_MC_BEGIN(0, 1);
7823 IEM_MC_LOCAL(uint64_t, u64Value);
7824 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
7825 IEM_MC_PUSH_U64(u64Value);
7826 IEM_MC_ADVANCE_RIP();
7827 IEM_MC_END();
7828 break;
7829 }
7830
7831 return VINF_SUCCESS;
7832}
7833
7834
7835/** Opcode 0x50. */
7836FNIEMOP_DEF(iemOp_push_eAX)
7837{
7838 IEMOP_MNEMONIC("push rAX");
7839 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
7840}
7841
7842
7843/** Opcode 0x51. */
7844FNIEMOP_DEF(iemOp_push_eCX)
7845{
7846 IEMOP_MNEMONIC("push rCX");
7847 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
7848}
7849
7850
7851/** Opcode 0x52. */
7852FNIEMOP_DEF(iemOp_push_eDX)
7853{
7854 IEMOP_MNEMONIC("push rDX");
7855 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
7856}
7857
7858
7859/** Opcode 0x53. */
7860FNIEMOP_DEF(iemOp_push_eBX)
7861{
7862 IEMOP_MNEMONIC("push rBX");
7863 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
7864}
7865
7866
7867/** Opcode 0x54. */
7868FNIEMOP_DEF(iemOp_push_eSP)
7869{
7870 IEMOP_MNEMONIC("push rSP");
7871 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
7872}
7873
7874
7875/** Opcode 0x55. */
7876FNIEMOP_DEF(iemOp_push_eBP)
7877{
7878 IEMOP_MNEMONIC("push rBP");
7879 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
7880}
7881
7882
7883/** Opcode 0x56. */
7884FNIEMOP_DEF(iemOp_push_eSI)
7885{
7886 IEMOP_MNEMONIC("push rSI");
7887 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
7888}
7889
7890
7891/** Opcode 0x57. */
7892FNIEMOP_DEF(iemOp_push_eDI)
7893{
7894 IEMOP_MNEMONIC("push rDI");
7895 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
7896}
7897
7898
7899/**
7900 * Common 'pop register' helper.
7901 */
7902FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
7903{
7904 IEMOP_HLP_NO_LOCK_PREFIX();
7905 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7906 {
7907 iReg |= pIemCpu->uRexB;
7908 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
7909 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
7910 }
7911
7912 switch (pIemCpu->enmEffOpSize)
7913 {
7914 case IEMMODE_16BIT:
7915 IEM_MC_BEGIN(0, 1);
7916 IEM_MC_LOCAL(uint16_t, *pu16Dst);
7917 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
7918 IEM_MC_POP_U16(pu16Dst);
7919 IEM_MC_ADVANCE_RIP();
7920 IEM_MC_END();
7921 break;
7922
7923 case IEMMODE_32BIT:
7924 IEM_MC_BEGIN(0, 1);
7925 IEM_MC_LOCAL(uint32_t, *pu32Dst);
7926 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
7927 IEM_MC_POP_U32(pu32Dst);
7928 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); /** @todo testcase*/
7929 IEM_MC_ADVANCE_RIP();
7930 IEM_MC_END();
7931 break;
7932
7933 case IEMMODE_64BIT:
7934 IEM_MC_BEGIN(0, 1);
7935 IEM_MC_LOCAL(uint64_t, *pu64Dst);
7936 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
7937 IEM_MC_POP_U64(pu64Dst);
7938 IEM_MC_ADVANCE_RIP();
7939 IEM_MC_END();
7940 break;
7941 }
7942
7943 return VINF_SUCCESS;
7944}
7945
7946
7947/** Opcode 0x58. */
7948FNIEMOP_DEF(iemOp_pop_eAX)
7949{
7950 IEMOP_MNEMONIC("pop rAX");
7951 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
7952}
7953
7954
7955/** Opcode 0x59. */
7956FNIEMOP_DEF(iemOp_pop_eCX)
7957{
7958 IEMOP_MNEMONIC("pop rCX");
7959 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
7960}
7961
7962
7963/** Opcode 0x5a. */
7964FNIEMOP_DEF(iemOp_pop_eDX)
7965{
7966 IEMOP_MNEMONIC("pop rDX");
7967 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
7968}
7969
7970
7971/** Opcode 0x5b. */
7972FNIEMOP_DEF(iemOp_pop_eBX)
7973{
7974 IEMOP_MNEMONIC("pop rBX");
7975 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
7976}
7977
7978
7979/** Opcode 0x5c. */
7980FNIEMOP_DEF(iemOp_pop_eSP)
7981{
7982 IEMOP_MNEMONIC("pop rSP");
7983 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7984 {
7985 if (pIemCpu->uRexB)
7986 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
7987 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
7988 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
7989 }
7990
7991 IEMOP_HLP_DECODED_NL_1(OP_POP, IEMOPFORM_FIXED, OP_PARM_REG_ESP,
7992 DISOPTYPE_HARMLESS | DISOPTYPE_DEFAULT_64_OP_SIZE | DISOPTYPE_REXB_EXTENDS_OPREG);
7993 /** @todo add testcase for this instruction. */
7994 switch (pIemCpu->enmEffOpSize)
7995 {
7996 case IEMMODE_16BIT:
7997 IEM_MC_BEGIN(0, 1);
7998 IEM_MC_LOCAL(uint16_t, u16Dst);
7999 IEM_MC_POP_U16(&u16Dst); /** @todo not correct MC, fix later. */
8000 IEM_MC_STORE_GREG_U16(X86_GREG_xSP, u16Dst);
8001 IEM_MC_ADVANCE_RIP();
8002 IEM_MC_END();
8003 break;
8004
8005 case IEMMODE_32BIT:
8006 IEM_MC_BEGIN(0, 1);
8007 IEM_MC_LOCAL(uint32_t, u32Dst);
8008 IEM_MC_POP_U32(&u32Dst);
8009 IEM_MC_STORE_GREG_U32(X86_GREG_xSP, u32Dst);
8010 IEM_MC_ADVANCE_RIP();
8011 IEM_MC_END();
8012 break;
8013
8014 case IEMMODE_64BIT:
8015 IEM_MC_BEGIN(0, 1);
8016 IEM_MC_LOCAL(uint64_t, u64Dst);
8017 IEM_MC_POP_U64(&u64Dst);
8018 IEM_MC_STORE_GREG_U64(X86_GREG_xSP, u64Dst);
8019 IEM_MC_ADVANCE_RIP();
8020 IEM_MC_END();
8021 break;
8022 }
8023
8024 return VINF_SUCCESS;
8025}
8026
8027
8028/** Opcode 0x5d. */
8029FNIEMOP_DEF(iemOp_pop_eBP)
8030{
8031 IEMOP_MNEMONIC("pop rBP");
8032 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
8033}
8034
8035
8036/** Opcode 0x5e. */
8037FNIEMOP_DEF(iemOp_pop_eSI)
8038{
8039 IEMOP_MNEMONIC("pop rSI");
8040 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
8041}
8042
8043
8044/** Opcode 0x5f. */
8045FNIEMOP_DEF(iemOp_pop_eDI)
8046{
8047 IEMOP_MNEMONIC("pop rDI");
8048 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
8049}
8050
8051
8052/** Opcode 0x60. */
8053FNIEMOP_DEF(iemOp_pusha)
8054{
8055 IEMOP_MNEMONIC("pusha");
8056 IEMOP_HLP_NO_64BIT();
8057 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
8058 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
8059 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
8060 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
8061}
8062
8063
8064/** Opcode 0x61. */
8065FNIEMOP_DEF(iemOp_popa)
8066{
8067 IEMOP_MNEMONIC("popa");
8068 IEMOP_HLP_NO_64BIT();
8069 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
8070 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
8071 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
8072 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
8073}
8074
8075
8076/** Opcode 0x62. */
8077FNIEMOP_STUB(iemOp_bound_Gv_Ma);
8078
8079
8080/** Opcode 0x63 - non-64-bit modes. */
8081FNIEMOP_DEF(iemOp_arpl_Ew_Gw)
8082{
8083 IEMOP_MNEMONIC("arpl Ew,Gw");
8084 IEMOP_HLP_NO_REAL_OR_V86_MODE();
8085 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8086
8087 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8088 {
8089 /* Register */
8090 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
8091 IEM_MC_BEGIN(3, 0);
8092 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8093 IEM_MC_ARG(uint16_t, u16Src, 1);
8094 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8095
8096 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
8097 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK));
8098 IEM_MC_REF_EFLAGS(pEFlags);
8099 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
8100
8101 IEM_MC_ADVANCE_RIP();
8102 IEM_MC_END();
8103 }
8104 else
8105 {
8106 /* Memory */
8107 IEM_MC_BEGIN(3, 2);
8108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8109 IEM_MC_ARG(uint16_t, u16Src, 1);
8110 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8111 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8112
8113 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
8114 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
8115 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8116 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
8117 IEM_MC_FETCH_EFLAGS(EFlags);
8118 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
8119
8120 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
8121 IEM_MC_COMMIT_EFLAGS(EFlags);
8122 IEM_MC_ADVANCE_RIP();
8123 IEM_MC_END();
8124 }
8125 return VINF_SUCCESS;
8126
8127}
8128
8129
8130/** Opcode 0x63.
8131 * @note This is a weird one. It works like a regular move instruction if
8132 * REX.W isn't set, at least according to AMD docs (rev 3.15, 2009-11).
8133 * @todo This definitely needs a testcase to verify the odd cases. */
8134FNIEMOP_DEF(iemOp_movsxd_Gv_Ev)
8135{
8136 Assert(pIemCpu->enmEffOpSize == IEMMODE_64BIT); /* Caller branched already . */
8137
8138 IEMOP_MNEMONIC("movsxd Gv,Ev");
8139 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8140
8141 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8142 {
8143 /*
8144 * Register to register.
8145 */
8146 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8147 IEM_MC_BEGIN(0, 1);
8148 IEM_MC_LOCAL(uint64_t, u64Value);
8149 IEM_MC_FETCH_GREG_U32_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8150 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
8151 IEM_MC_ADVANCE_RIP();
8152 IEM_MC_END();
8153 }
8154 else
8155 {
8156 /*
8157 * We're loading a register from memory.
8158 */
8159 IEM_MC_BEGIN(0, 2);
8160 IEM_MC_LOCAL(uint64_t, u64Value);
8161 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8162 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
8163 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8164 IEM_MC_FETCH_MEM_U32_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
8165 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
8166 IEM_MC_ADVANCE_RIP();
8167 IEM_MC_END();
8168 }
8169 return VINF_SUCCESS;
8170}
8171
8172
8173/** Opcode 0x64. */
8174FNIEMOP_DEF(iemOp_seg_FS)
8175{
8176 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg fs");
8177 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
8178 pIemCpu->iEffSeg = X86_SREG_FS;
8179
8180 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8181 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8182}
8183
8184
8185/** Opcode 0x65. */
8186FNIEMOP_DEF(iemOp_seg_GS)
8187{
8188 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg gs");
8189 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
8190 pIemCpu->iEffSeg = X86_SREG_GS;
8191
8192 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8193 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8194}
8195
8196
8197/** Opcode 0x66. */
8198FNIEMOP_DEF(iemOp_op_size)
8199{
8200 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("op size");
8201 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
8202 iemRecalEffOpSize(pIemCpu);
8203
8204 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8205 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8206}
8207
8208
8209/** Opcode 0x67. */
8210FNIEMOP_DEF(iemOp_addr_size)
8211{
8212 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("addr size");
8213 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
8214 switch (pIemCpu->enmDefAddrMode)
8215 {
8216 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
8217 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
8218 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
8219 default: AssertFailed();
8220 }
8221
8222 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8223 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8224}
8225
8226
8227/** Opcode 0x68. */
8228FNIEMOP_DEF(iemOp_push_Iz)
8229{
8230 IEMOP_MNEMONIC("push Iz");
8231 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8232 switch (pIemCpu->enmEffOpSize)
8233 {
8234 case IEMMODE_16BIT:
8235 {
8236 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8237 IEMOP_HLP_NO_LOCK_PREFIX();
8238 IEM_MC_BEGIN(0,0);
8239 IEM_MC_PUSH_U16(u16Imm);
8240 IEM_MC_ADVANCE_RIP();
8241 IEM_MC_END();
8242 return VINF_SUCCESS;
8243 }
8244
8245 case IEMMODE_32BIT:
8246 {
8247 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8248 IEMOP_HLP_NO_LOCK_PREFIX();
8249 IEM_MC_BEGIN(0,0);
8250 IEM_MC_PUSH_U32(u32Imm);
8251 IEM_MC_ADVANCE_RIP();
8252 IEM_MC_END();
8253 return VINF_SUCCESS;
8254 }
8255
8256 case IEMMODE_64BIT:
8257 {
8258 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8259 IEMOP_HLP_NO_LOCK_PREFIX();
8260 IEM_MC_BEGIN(0,0);
8261 IEM_MC_PUSH_U64(u64Imm);
8262 IEM_MC_ADVANCE_RIP();
8263 IEM_MC_END();
8264 return VINF_SUCCESS;
8265 }
8266
8267 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8268 }
8269}
8270
8271
8272/** Opcode 0x69. */
8273FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
8274{
8275 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
8276 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8277 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
8278
8279 switch (pIemCpu->enmEffOpSize)
8280 {
8281 case IEMMODE_16BIT:
8282 {
8283 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8284 {
8285 /* register operand */
8286 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8287 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8288
8289 IEM_MC_BEGIN(3, 1);
8290 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8291 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
8292 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8293 IEM_MC_LOCAL(uint16_t, u16Tmp);
8294
8295 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8296 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8297 IEM_MC_REF_EFLAGS(pEFlags);
8298 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8299 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8300
8301 IEM_MC_ADVANCE_RIP();
8302 IEM_MC_END();
8303 }
8304 else
8305 {
8306 /* memory operand */
8307 IEM_MC_BEGIN(3, 2);
8308 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8309 IEM_MC_ARG(uint16_t, u16Src, 1);
8310 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8311 IEM_MC_LOCAL(uint16_t, u16Tmp);
8312 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8313
8314 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
8315 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8316 IEM_MC_ASSIGN(u16Src, u16Imm);
8317 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8318 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8319 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8320 IEM_MC_REF_EFLAGS(pEFlags);
8321 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8322 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8323
8324 IEM_MC_ADVANCE_RIP();
8325 IEM_MC_END();
8326 }
8327 return VINF_SUCCESS;
8328 }
8329
8330 case IEMMODE_32BIT:
8331 {
8332 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8333 {
8334 /* register operand */
8335 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8336 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8337
8338 IEM_MC_BEGIN(3, 1);
8339 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8340 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
8341 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8342 IEM_MC_LOCAL(uint32_t, u32Tmp);
8343
8344 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8345 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8346 IEM_MC_REF_EFLAGS(pEFlags);
8347 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8348 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8349
8350 IEM_MC_ADVANCE_RIP();
8351 IEM_MC_END();
8352 }
8353 else
8354 {
8355 /* memory operand */
8356 IEM_MC_BEGIN(3, 2);
8357 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8358 IEM_MC_ARG(uint32_t, u32Src, 1);
8359 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8360 IEM_MC_LOCAL(uint32_t, u32Tmp);
8361 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8362
8363 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
8364 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8365 IEM_MC_ASSIGN(u32Src, u32Imm);
8366 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8367 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8368 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8369 IEM_MC_REF_EFLAGS(pEFlags);
8370 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8371 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8372
8373 IEM_MC_ADVANCE_RIP();
8374 IEM_MC_END();
8375 }
8376 return VINF_SUCCESS;
8377 }
8378
8379 case IEMMODE_64BIT:
8380 {
8381 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8382 {
8383 /* register operand */
8384 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8385 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8386
8387 IEM_MC_BEGIN(3, 1);
8388 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8389 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
8390 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8391 IEM_MC_LOCAL(uint64_t, u64Tmp);
8392
8393 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8394 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8395 IEM_MC_REF_EFLAGS(pEFlags);
8396 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8397 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8398
8399 IEM_MC_ADVANCE_RIP();
8400 IEM_MC_END();
8401 }
8402 else
8403 {
8404 /* memory operand */
8405 IEM_MC_BEGIN(3, 2);
8406 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8407 IEM_MC_ARG(uint64_t, u64Src, 1);
8408 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8409 IEM_MC_LOCAL(uint64_t, u64Tmp);
8410 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8411
8412 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
8413 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8414 IEM_MC_ASSIGN(u64Src, u64Imm);
8415 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8416 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8417 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8418 IEM_MC_REF_EFLAGS(pEFlags);
8419 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8420 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8421
8422 IEM_MC_ADVANCE_RIP();
8423 IEM_MC_END();
8424 }
8425 return VINF_SUCCESS;
8426 }
8427 }
8428 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8429}
8430
8431
8432/** Opcode 0x6a. */
8433FNIEMOP_DEF(iemOp_push_Ib)
8434{
8435 IEMOP_MNEMONIC("push Ib");
8436 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8437 IEMOP_HLP_NO_LOCK_PREFIX();
8438 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8439
8440 IEM_MC_BEGIN(0,0);
8441 switch (pIemCpu->enmEffOpSize)
8442 {
8443 case IEMMODE_16BIT:
8444 IEM_MC_PUSH_U16(i8Imm);
8445 break;
8446 case IEMMODE_32BIT:
8447 IEM_MC_PUSH_U32(i8Imm);
8448 break;
8449 case IEMMODE_64BIT:
8450 IEM_MC_PUSH_U64(i8Imm);
8451 break;
8452 }
8453 IEM_MC_ADVANCE_RIP();
8454 IEM_MC_END();
8455 return VINF_SUCCESS;
8456}
8457
8458
8459/** Opcode 0x6b. */
8460FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
8461{
8462 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
8463 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8464 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
8465
8466 switch (pIemCpu->enmEffOpSize)
8467 {
8468 case IEMMODE_16BIT:
8469 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8470 {
8471 /* register operand */
8472 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8473 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8474
8475 IEM_MC_BEGIN(3, 1);
8476 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8477 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
8478 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8479 IEM_MC_LOCAL(uint16_t, u16Tmp);
8480
8481 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8482 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8483 IEM_MC_REF_EFLAGS(pEFlags);
8484 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8485 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8486
8487 IEM_MC_ADVANCE_RIP();
8488 IEM_MC_END();
8489 }
8490 else
8491 {
8492 /* memory operand */
8493 IEM_MC_BEGIN(3, 2);
8494 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8495 IEM_MC_ARG(uint16_t, u16Src, 1);
8496 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8497 IEM_MC_LOCAL(uint16_t, u16Tmp);
8498 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8499
8500 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8501 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16Imm);
8502 IEM_MC_ASSIGN(u16Src, u16Imm);
8503 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8504 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8505 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8506 IEM_MC_REF_EFLAGS(pEFlags);
8507 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8508 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8509
8510 IEM_MC_ADVANCE_RIP();
8511 IEM_MC_END();
8512 }
8513 return VINF_SUCCESS;
8514
8515 case IEMMODE_32BIT:
8516 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8517 {
8518 /* register operand */
8519 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8520 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8521
8522 IEM_MC_BEGIN(3, 1);
8523 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8524 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
8525 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8526 IEM_MC_LOCAL(uint32_t, u32Tmp);
8527
8528 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8529 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8530 IEM_MC_REF_EFLAGS(pEFlags);
8531 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8532 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8533
8534 IEM_MC_ADVANCE_RIP();
8535 IEM_MC_END();
8536 }
8537 else
8538 {
8539 /* memory operand */
8540 IEM_MC_BEGIN(3, 2);
8541 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8542 IEM_MC_ARG(uint32_t, u32Src, 1);
8543 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8544 IEM_MC_LOCAL(uint32_t, u32Tmp);
8545 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8546
8547 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8548 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_S8_SX_U32(&u32Imm);
8549 IEM_MC_ASSIGN(u32Src, u32Imm);
8550 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8551 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8552 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8553 IEM_MC_REF_EFLAGS(pEFlags);
8554 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8555 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8556
8557 IEM_MC_ADVANCE_RIP();
8558 IEM_MC_END();
8559 }
8560 return VINF_SUCCESS;
8561
8562 case IEMMODE_64BIT:
8563 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8564 {
8565 /* register operand */
8566 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8567 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8568
8569 IEM_MC_BEGIN(3, 1);
8570 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8571 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
8572 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8573 IEM_MC_LOCAL(uint64_t, u64Tmp);
8574
8575 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8576 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8577 IEM_MC_REF_EFLAGS(pEFlags);
8578 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8579 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8580
8581 IEM_MC_ADVANCE_RIP();
8582 IEM_MC_END();
8583 }
8584 else
8585 {
8586 /* memory operand */
8587 IEM_MC_BEGIN(3, 2);
8588 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8589 IEM_MC_ARG(uint64_t, u64Src, 1);
8590 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8591 IEM_MC_LOCAL(uint64_t, u64Tmp);
8592 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8593
8594 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8595 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S8_SX_U64(&u64Imm);
8596 IEM_MC_ASSIGN(u64Src, u64Imm);
8597 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8598 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8599 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8600 IEM_MC_REF_EFLAGS(pEFlags);
8601 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8602 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8603
8604 IEM_MC_ADVANCE_RIP();
8605 IEM_MC_END();
8606 }
8607 return VINF_SUCCESS;
8608 }
8609 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8610}
8611
8612
8613/** Opcode 0x6c. */
8614FNIEMOP_DEF(iemOp_insb_Yb_DX)
8615{
8616 IEMOP_HLP_NO_LOCK_PREFIX();
8617 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8618 {
8619 IEMOP_MNEMONIC("rep ins Yb,DX");
8620 switch (pIemCpu->enmEffAddrMode)
8621 {
8622 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr16, false);
8623 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr32, false);
8624 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr64, false);
8625 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8626 }
8627 }
8628 else
8629 {
8630 IEMOP_MNEMONIC("ins Yb,DX");
8631 switch (pIemCpu->enmEffAddrMode)
8632 {
8633 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr16, false);
8634 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr32, false);
8635 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr64, false);
8636 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8637 }
8638 }
8639}
8640
8641
8642/** Opcode 0x6d. */
8643FNIEMOP_DEF(iemOp_inswd_Yv_DX)
8644{
8645 IEMOP_HLP_NO_LOCK_PREFIX();
8646 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
8647 {
8648 IEMOP_MNEMONIC("rep ins Yv,DX");
8649 switch (pIemCpu->enmEffOpSize)
8650 {
8651 case IEMMODE_16BIT:
8652 switch (pIemCpu->enmEffAddrMode)
8653 {
8654 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr16, false);
8655 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr32, false);
8656 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr64, false);
8657 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8658 }
8659 break;
8660 case IEMMODE_64BIT:
8661 case IEMMODE_32BIT:
8662 switch (pIemCpu->enmEffAddrMode)
8663 {
8664 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr16, false);
8665 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr32, false);
8666 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr64, false);
8667 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8668 }
8669 break;
8670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8671 }
8672 }
8673 else
8674 {
8675 IEMOP_MNEMONIC("ins Yv,DX");
8676 switch (pIemCpu->enmEffOpSize)
8677 {
8678 case IEMMODE_16BIT:
8679 switch (pIemCpu->enmEffAddrMode)
8680 {
8681 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr16, false);
8682 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr32, false);
8683 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr64, false);
8684 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8685 }
8686 break;
8687 case IEMMODE_64BIT:
8688 case IEMMODE_32BIT:
8689 switch (pIemCpu->enmEffAddrMode)
8690 {
8691 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr16, false);
8692 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr32, false);
8693 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr64, false);
8694 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8695 }
8696 break;
8697 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8698 }
8699 }
8700}
8701
8702
8703/** Opcode 0x6e. */
8704FNIEMOP_DEF(iemOp_outsb_Yb_DX)
8705{
8706 IEMOP_HLP_NO_LOCK_PREFIX();
8707 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8708 {
8709 IEMOP_MNEMONIC("rep out DX,Yb");
8710 switch (pIemCpu->enmEffAddrMode)
8711 {
8712 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg, false);
8713 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg, false);
8714 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg, false);
8715 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8716 }
8717 }
8718 else
8719 {
8720 IEMOP_MNEMONIC("out DX,Yb");
8721 switch (pIemCpu->enmEffAddrMode)
8722 {
8723 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg, false);
8724 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg, false);
8725 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg, false);
8726 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8727 }
8728 }
8729}
8730
8731
8732/** Opcode 0x6f. */
8733FNIEMOP_DEF(iemOp_outswd_Yv_DX)
8734{
8735 IEMOP_HLP_NO_LOCK_PREFIX();
8736 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
8737 {
8738 IEMOP_MNEMONIC("rep outs DX,Yv");
8739 switch (pIemCpu->enmEffOpSize)
8740 {
8741 case IEMMODE_16BIT:
8742 switch (pIemCpu->enmEffAddrMode)
8743 {
8744 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg, false);
8745 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg, false);
8746 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg, false);
8747 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8748 }
8749 break;
8750 case IEMMODE_64BIT:
8751 case IEMMODE_32BIT:
8752 switch (pIemCpu->enmEffAddrMode)
8753 {
8754 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg, false);
8755 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg, false);
8756 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg, false);
8757 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8758 }
8759 break;
8760 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8761 }
8762 }
8763 else
8764 {
8765 IEMOP_MNEMONIC("outs DX,Yv");
8766 switch (pIemCpu->enmEffOpSize)
8767 {
8768 case IEMMODE_16BIT:
8769 switch (pIemCpu->enmEffAddrMode)
8770 {
8771 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg, false);
8772 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg, false);
8773 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg, false);
8774 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8775 }
8776 break;
8777 case IEMMODE_64BIT:
8778 case IEMMODE_32BIT:
8779 switch (pIemCpu->enmEffAddrMode)
8780 {
8781 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg, false);
8782 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg, false);
8783 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg, false);
8784 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8785 }
8786 break;
8787 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8788 }
8789 }
8790}
8791
8792
8793/** Opcode 0x70. */
8794FNIEMOP_DEF(iemOp_jo_Jb)
8795{
8796 IEMOP_MNEMONIC("jo Jb");
8797 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8798 IEMOP_HLP_NO_LOCK_PREFIX();
8799 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8800
8801 IEM_MC_BEGIN(0, 0);
8802 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
8803 IEM_MC_REL_JMP_S8(i8Imm);
8804 } IEM_MC_ELSE() {
8805 IEM_MC_ADVANCE_RIP();
8806 } IEM_MC_ENDIF();
8807 IEM_MC_END();
8808 return VINF_SUCCESS;
8809}
8810
8811
8812/** Opcode 0x71. */
8813FNIEMOP_DEF(iemOp_jno_Jb)
8814{
8815 IEMOP_MNEMONIC("jno Jb");
8816 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8817 IEMOP_HLP_NO_LOCK_PREFIX();
8818 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8819
8820 IEM_MC_BEGIN(0, 0);
8821 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
8822 IEM_MC_ADVANCE_RIP();
8823 } IEM_MC_ELSE() {
8824 IEM_MC_REL_JMP_S8(i8Imm);
8825 } IEM_MC_ENDIF();
8826 IEM_MC_END();
8827 return VINF_SUCCESS;
8828}
8829
8830/** Opcode 0x72. */
8831FNIEMOP_DEF(iemOp_jc_Jb)
8832{
8833 IEMOP_MNEMONIC("jc/jnae Jb");
8834 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8835 IEMOP_HLP_NO_LOCK_PREFIX();
8836 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8837
8838 IEM_MC_BEGIN(0, 0);
8839 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
8840 IEM_MC_REL_JMP_S8(i8Imm);
8841 } IEM_MC_ELSE() {
8842 IEM_MC_ADVANCE_RIP();
8843 } IEM_MC_ENDIF();
8844 IEM_MC_END();
8845 return VINF_SUCCESS;
8846}
8847
8848
8849/** Opcode 0x73. */
8850FNIEMOP_DEF(iemOp_jnc_Jb)
8851{
8852 IEMOP_MNEMONIC("jnc/jnb Jb");
8853 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8854 IEMOP_HLP_NO_LOCK_PREFIX();
8855 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8856
8857 IEM_MC_BEGIN(0, 0);
8858 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
8859 IEM_MC_ADVANCE_RIP();
8860 } IEM_MC_ELSE() {
8861 IEM_MC_REL_JMP_S8(i8Imm);
8862 } IEM_MC_ENDIF();
8863 IEM_MC_END();
8864 return VINF_SUCCESS;
8865}
8866
8867
8868/** Opcode 0x74. */
8869FNIEMOP_DEF(iemOp_je_Jb)
8870{
8871 IEMOP_MNEMONIC("je/jz Jb");
8872 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8873 IEMOP_HLP_NO_LOCK_PREFIX();
8874 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8875
8876 IEM_MC_BEGIN(0, 0);
8877 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
8878 IEM_MC_REL_JMP_S8(i8Imm);
8879 } IEM_MC_ELSE() {
8880 IEM_MC_ADVANCE_RIP();
8881 } IEM_MC_ENDIF();
8882 IEM_MC_END();
8883 return VINF_SUCCESS;
8884}
8885
8886
8887/** Opcode 0x75. */
8888FNIEMOP_DEF(iemOp_jne_Jb)
8889{
8890 IEMOP_MNEMONIC("jne/jnz Jb");
8891 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8892 IEMOP_HLP_NO_LOCK_PREFIX();
8893 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8894
8895 IEM_MC_BEGIN(0, 0);
8896 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
8897 IEM_MC_ADVANCE_RIP();
8898 } IEM_MC_ELSE() {
8899 IEM_MC_REL_JMP_S8(i8Imm);
8900 } IEM_MC_ENDIF();
8901 IEM_MC_END();
8902 return VINF_SUCCESS;
8903}
8904
8905
8906/** Opcode 0x76. */
8907FNIEMOP_DEF(iemOp_jbe_Jb)
8908{
8909 IEMOP_MNEMONIC("jbe/jna Jb");
8910 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8911 IEMOP_HLP_NO_LOCK_PREFIX();
8912 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8913
8914 IEM_MC_BEGIN(0, 0);
8915 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
8916 IEM_MC_REL_JMP_S8(i8Imm);
8917 } IEM_MC_ELSE() {
8918 IEM_MC_ADVANCE_RIP();
8919 } IEM_MC_ENDIF();
8920 IEM_MC_END();
8921 return VINF_SUCCESS;
8922}
8923
8924
8925/** Opcode 0x77. */
8926FNIEMOP_DEF(iemOp_jnbe_Jb)
8927{
8928 IEMOP_MNEMONIC("jnbe/ja Jb");
8929 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8930 IEMOP_HLP_NO_LOCK_PREFIX();
8931 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8932
8933 IEM_MC_BEGIN(0, 0);
8934 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
8935 IEM_MC_ADVANCE_RIP();
8936 } IEM_MC_ELSE() {
8937 IEM_MC_REL_JMP_S8(i8Imm);
8938 } IEM_MC_ENDIF();
8939 IEM_MC_END();
8940 return VINF_SUCCESS;
8941}
8942
8943
8944/** Opcode 0x78. */
8945FNIEMOP_DEF(iemOp_js_Jb)
8946{
8947 IEMOP_MNEMONIC("js Jb");
8948 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8949 IEMOP_HLP_NO_LOCK_PREFIX();
8950 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8951
8952 IEM_MC_BEGIN(0, 0);
8953 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
8954 IEM_MC_REL_JMP_S8(i8Imm);
8955 } IEM_MC_ELSE() {
8956 IEM_MC_ADVANCE_RIP();
8957 } IEM_MC_ENDIF();
8958 IEM_MC_END();
8959 return VINF_SUCCESS;
8960}
8961
8962
8963/** Opcode 0x79. */
8964FNIEMOP_DEF(iemOp_jns_Jb)
8965{
8966 IEMOP_MNEMONIC("jns Jb");
8967 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8968 IEMOP_HLP_NO_LOCK_PREFIX();
8969 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8970
8971 IEM_MC_BEGIN(0, 0);
8972 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
8973 IEM_MC_ADVANCE_RIP();
8974 } IEM_MC_ELSE() {
8975 IEM_MC_REL_JMP_S8(i8Imm);
8976 } IEM_MC_ENDIF();
8977 IEM_MC_END();
8978 return VINF_SUCCESS;
8979}
8980
8981
8982/** Opcode 0x7a. */
8983FNIEMOP_DEF(iemOp_jp_Jb)
8984{
8985 IEMOP_MNEMONIC("jp Jb");
8986 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8987 IEMOP_HLP_NO_LOCK_PREFIX();
8988 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8989
8990 IEM_MC_BEGIN(0, 0);
8991 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
8992 IEM_MC_REL_JMP_S8(i8Imm);
8993 } IEM_MC_ELSE() {
8994 IEM_MC_ADVANCE_RIP();
8995 } IEM_MC_ENDIF();
8996 IEM_MC_END();
8997 return VINF_SUCCESS;
8998}
8999
9000
9001/** Opcode 0x7b. */
9002FNIEMOP_DEF(iemOp_jnp_Jb)
9003{
9004 IEMOP_MNEMONIC("jnp Jb");
9005 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9006 IEMOP_HLP_NO_LOCK_PREFIX();
9007 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9008
9009 IEM_MC_BEGIN(0, 0);
9010 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
9011 IEM_MC_ADVANCE_RIP();
9012 } IEM_MC_ELSE() {
9013 IEM_MC_REL_JMP_S8(i8Imm);
9014 } IEM_MC_ENDIF();
9015 IEM_MC_END();
9016 return VINF_SUCCESS;
9017}
9018
9019
9020/** Opcode 0x7c. */
9021FNIEMOP_DEF(iemOp_jl_Jb)
9022{
9023 IEMOP_MNEMONIC("jl/jnge Jb");
9024 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9025 IEMOP_HLP_NO_LOCK_PREFIX();
9026 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9027
9028 IEM_MC_BEGIN(0, 0);
9029 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
9030 IEM_MC_REL_JMP_S8(i8Imm);
9031 } IEM_MC_ELSE() {
9032 IEM_MC_ADVANCE_RIP();
9033 } IEM_MC_ENDIF();
9034 IEM_MC_END();
9035 return VINF_SUCCESS;
9036}
9037
9038
9039/** Opcode 0x7d. */
9040FNIEMOP_DEF(iemOp_jnl_Jb)
9041{
9042 IEMOP_MNEMONIC("jnl/jge Jb");
9043 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9044 IEMOP_HLP_NO_LOCK_PREFIX();
9045 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9046
9047 IEM_MC_BEGIN(0, 0);
9048 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
9049 IEM_MC_ADVANCE_RIP();
9050 } IEM_MC_ELSE() {
9051 IEM_MC_REL_JMP_S8(i8Imm);
9052 } IEM_MC_ENDIF();
9053 IEM_MC_END();
9054 return VINF_SUCCESS;
9055}
9056
9057
9058/** Opcode 0x7e. */
9059FNIEMOP_DEF(iemOp_jle_Jb)
9060{
9061 IEMOP_MNEMONIC("jle/jng Jb");
9062 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9063 IEMOP_HLP_NO_LOCK_PREFIX();
9064 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9065
9066 IEM_MC_BEGIN(0, 0);
9067 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
9068 IEM_MC_REL_JMP_S8(i8Imm);
9069 } IEM_MC_ELSE() {
9070 IEM_MC_ADVANCE_RIP();
9071 } IEM_MC_ENDIF();
9072 IEM_MC_END();
9073 return VINF_SUCCESS;
9074}
9075
9076
9077/** Opcode 0x7f. */
9078FNIEMOP_DEF(iemOp_jnle_Jb)
9079{
9080 IEMOP_MNEMONIC("jnle/jg Jb");
9081 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9082 IEMOP_HLP_NO_LOCK_PREFIX();
9083 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9084
9085 IEM_MC_BEGIN(0, 0);
9086 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
9087 IEM_MC_ADVANCE_RIP();
9088 } IEM_MC_ELSE() {
9089 IEM_MC_REL_JMP_S8(i8Imm);
9090 } IEM_MC_ENDIF();
9091 IEM_MC_END();
9092 return VINF_SUCCESS;
9093}
9094
9095
9096/** Opcode 0x80. */
9097FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
9098{
9099 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9100 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
9101 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9102
9103 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9104 {
9105 /* register target */
9106 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9107 IEMOP_HLP_NO_LOCK_PREFIX();
9108 IEM_MC_BEGIN(3, 0);
9109 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9110 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
9111 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9112
9113 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9114 IEM_MC_REF_EFLAGS(pEFlags);
9115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
9116
9117 IEM_MC_ADVANCE_RIP();
9118 IEM_MC_END();
9119 }
9120 else
9121 {
9122 /* memory target */
9123 uint32_t fAccess;
9124 if (pImpl->pfnLockedU8)
9125 fAccess = IEM_ACCESS_DATA_RW;
9126 else
9127 { /* CMP */
9128 IEMOP_HLP_NO_LOCK_PREFIX();
9129 fAccess = IEM_ACCESS_DATA_R;
9130 }
9131 IEM_MC_BEGIN(3, 2);
9132 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9133 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9134 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9135
9136 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9137 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9138 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
9139
9140 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9141 IEM_MC_FETCH_EFLAGS(EFlags);
9142 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9143 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
9144 else
9145 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
9146
9147 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
9148 IEM_MC_COMMIT_EFLAGS(EFlags);
9149 IEM_MC_ADVANCE_RIP();
9150 IEM_MC_END();
9151 }
9152 return VINF_SUCCESS;
9153}
9154
9155
9156/** Opcode 0x81. */
9157FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
9158{
9159 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9160 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
9161 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9162
9163 switch (pIemCpu->enmEffOpSize)
9164 {
9165 case IEMMODE_16BIT:
9166 {
9167 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9168 {
9169 /* register target */
9170 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9171 IEMOP_HLP_NO_LOCK_PREFIX();
9172 IEM_MC_BEGIN(3, 0);
9173 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9174 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
9175 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9176
9177 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9178 IEM_MC_REF_EFLAGS(pEFlags);
9179 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9180
9181 IEM_MC_ADVANCE_RIP();
9182 IEM_MC_END();
9183 }
9184 else
9185 {
9186 /* memory target */
9187 uint32_t fAccess;
9188 if (pImpl->pfnLockedU16)
9189 fAccess = IEM_ACCESS_DATA_RW;
9190 else
9191 { /* CMP, TEST */
9192 IEMOP_HLP_NO_LOCK_PREFIX();
9193 fAccess = IEM_ACCESS_DATA_R;
9194 }
9195 IEM_MC_BEGIN(3, 2);
9196 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9197 IEM_MC_ARG(uint16_t, u16Src, 1);
9198 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9199 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9200
9201 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
9202 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9203 IEM_MC_ASSIGN(u16Src, u16Imm);
9204 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9205 IEM_MC_FETCH_EFLAGS(EFlags);
9206 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9207 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9208 else
9209 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
9210
9211 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
9212 IEM_MC_COMMIT_EFLAGS(EFlags);
9213 IEM_MC_ADVANCE_RIP();
9214 IEM_MC_END();
9215 }
9216 break;
9217 }
9218
9219 case IEMMODE_32BIT:
9220 {
9221 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9222 {
9223 /* register target */
9224 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9225 IEMOP_HLP_NO_LOCK_PREFIX();
9226 IEM_MC_BEGIN(3, 0);
9227 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9228 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
9229 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9230
9231 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9232 IEM_MC_REF_EFLAGS(pEFlags);
9233 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9234 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
9235
9236 IEM_MC_ADVANCE_RIP();
9237 IEM_MC_END();
9238 }
9239 else
9240 {
9241 /* memory target */
9242 uint32_t fAccess;
9243 if (pImpl->pfnLockedU32)
9244 fAccess = IEM_ACCESS_DATA_RW;
9245 else
9246 { /* CMP, TEST */
9247 IEMOP_HLP_NO_LOCK_PREFIX();
9248 fAccess = IEM_ACCESS_DATA_R;
9249 }
9250 IEM_MC_BEGIN(3, 2);
9251 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9252 IEM_MC_ARG(uint32_t, u32Src, 1);
9253 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9254 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9255
9256 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
9257 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9258 IEM_MC_ASSIGN(u32Src, u32Imm);
9259 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9260 IEM_MC_FETCH_EFLAGS(EFlags);
9261 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9262 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9263 else
9264 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
9265
9266 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
9267 IEM_MC_COMMIT_EFLAGS(EFlags);
9268 IEM_MC_ADVANCE_RIP();
9269 IEM_MC_END();
9270 }
9271 break;
9272 }
9273
9274 case IEMMODE_64BIT:
9275 {
9276 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9277 {
9278 /* register target */
9279 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
9280 IEMOP_HLP_NO_LOCK_PREFIX();
9281 IEM_MC_BEGIN(3, 0);
9282 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9283 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
9284 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9285
9286 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9287 IEM_MC_REF_EFLAGS(pEFlags);
9288 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9289
9290 IEM_MC_ADVANCE_RIP();
9291 IEM_MC_END();
9292 }
9293 else
9294 {
9295 /* memory target */
9296 uint32_t fAccess;
9297 if (pImpl->pfnLockedU64)
9298 fAccess = IEM_ACCESS_DATA_RW;
9299 else
9300 { /* CMP */
9301 IEMOP_HLP_NO_LOCK_PREFIX();
9302 fAccess = IEM_ACCESS_DATA_R;
9303 }
9304 IEM_MC_BEGIN(3, 2);
9305 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9306 IEM_MC_ARG(uint64_t, u64Src, 1);
9307 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9308 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9309
9310 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
9311 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
9312 IEM_MC_ASSIGN(u64Src, u64Imm);
9313 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9314 IEM_MC_FETCH_EFLAGS(EFlags);
9315 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9316 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9317 else
9318 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
9319
9320 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
9321 IEM_MC_COMMIT_EFLAGS(EFlags);
9322 IEM_MC_ADVANCE_RIP();
9323 IEM_MC_END();
9324 }
9325 break;
9326 }
9327 }
9328 return VINF_SUCCESS;
9329}
9330
9331
9332/** Opcode 0x82. */
9333FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
9334{
9335 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
9336 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
9337}
9338
9339
9340/** Opcode 0x83. */
9341FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
9342{
9343 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9344 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
9345 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9346
9347 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9348 {
9349 /*
9350 * Register target
9351 */
9352 IEMOP_HLP_NO_LOCK_PREFIX();
9353 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9354 switch (pIemCpu->enmEffOpSize)
9355 {
9356 case IEMMODE_16BIT:
9357 {
9358 IEM_MC_BEGIN(3, 0);
9359 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9360 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
9361 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9362
9363 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9364 IEM_MC_REF_EFLAGS(pEFlags);
9365 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9366
9367 IEM_MC_ADVANCE_RIP();
9368 IEM_MC_END();
9369 break;
9370 }
9371
9372 case IEMMODE_32BIT:
9373 {
9374 IEM_MC_BEGIN(3, 0);
9375 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9376 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
9377 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9378
9379 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9380 IEM_MC_REF_EFLAGS(pEFlags);
9381 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9382 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
9383
9384 IEM_MC_ADVANCE_RIP();
9385 IEM_MC_END();
9386 break;
9387 }
9388
9389 case IEMMODE_64BIT:
9390 {
9391 IEM_MC_BEGIN(3, 0);
9392 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9393 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
9394 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9395
9396 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9397 IEM_MC_REF_EFLAGS(pEFlags);
9398 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9399
9400 IEM_MC_ADVANCE_RIP();
9401 IEM_MC_END();
9402 break;
9403 }
9404 }
9405 }
9406 else
9407 {
9408 /*
9409 * Memory target.
9410 */
9411 uint32_t fAccess;
9412 if (pImpl->pfnLockedU16)
9413 fAccess = IEM_ACCESS_DATA_RW;
9414 else
9415 { /* CMP */
9416 IEMOP_HLP_NO_LOCK_PREFIX();
9417 fAccess = IEM_ACCESS_DATA_R;
9418 }
9419
9420 switch (pIemCpu->enmEffOpSize)
9421 {
9422 case IEMMODE_16BIT:
9423 {
9424 IEM_MC_BEGIN(3, 2);
9425 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9426 IEM_MC_ARG(uint16_t, u16Src, 1);
9427 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9428 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9429
9430 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9431 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9432 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
9433 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9434 IEM_MC_FETCH_EFLAGS(EFlags);
9435 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9436 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9437 else
9438 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
9439
9440 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
9441 IEM_MC_COMMIT_EFLAGS(EFlags);
9442 IEM_MC_ADVANCE_RIP();
9443 IEM_MC_END();
9444 break;
9445 }
9446
9447 case IEMMODE_32BIT:
9448 {
9449 IEM_MC_BEGIN(3, 2);
9450 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9451 IEM_MC_ARG(uint32_t, u32Src, 1);
9452 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9453 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9454
9455 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9456 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9457 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
9458 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9459 IEM_MC_FETCH_EFLAGS(EFlags);
9460 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9461 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9462 else
9463 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
9464
9465 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
9466 IEM_MC_COMMIT_EFLAGS(EFlags);
9467 IEM_MC_ADVANCE_RIP();
9468 IEM_MC_END();
9469 break;
9470 }
9471
9472 case IEMMODE_64BIT:
9473 {
9474 IEM_MC_BEGIN(3, 2);
9475 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9476 IEM_MC_ARG(uint64_t, u64Src, 1);
9477 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9478 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9479
9480 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9481 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9482 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
9483 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9484 IEM_MC_FETCH_EFLAGS(EFlags);
9485 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9486 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9487 else
9488 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
9489
9490 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
9491 IEM_MC_COMMIT_EFLAGS(EFlags);
9492 IEM_MC_ADVANCE_RIP();
9493 IEM_MC_END();
9494 break;
9495 }
9496 }
9497 }
9498 return VINF_SUCCESS;
9499}
9500
9501
9502/** Opcode 0x84. */
9503FNIEMOP_DEF(iemOp_test_Eb_Gb)
9504{
9505 IEMOP_MNEMONIC("test Eb,Gb");
9506 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
9507 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
9508 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
9509}
9510
9511
9512/** Opcode 0x85. */
9513FNIEMOP_DEF(iemOp_test_Ev_Gv)
9514{
9515 IEMOP_MNEMONIC("test Ev,Gv");
9516 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
9517 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
9518 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
9519}
9520
9521
9522/** Opcode 0x86. */
9523FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
9524{
9525 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9526 IEMOP_MNEMONIC("xchg Eb,Gb");
9527
9528 /*
9529 * If rm is denoting a register, no more instruction bytes.
9530 */
9531 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9532 {
9533 IEMOP_HLP_NO_LOCK_PREFIX();
9534
9535 IEM_MC_BEGIN(0, 2);
9536 IEM_MC_LOCAL(uint8_t, uTmp1);
9537 IEM_MC_LOCAL(uint8_t, uTmp2);
9538
9539 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9540 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9541 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9542 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9543
9544 IEM_MC_ADVANCE_RIP();
9545 IEM_MC_END();
9546 }
9547 else
9548 {
9549 /*
9550 * We're accessing memory.
9551 */
9552/** @todo the register must be committed separately! */
9553 IEM_MC_BEGIN(2, 2);
9554 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
9555 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
9556 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9557
9558 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9559 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9560 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9561 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
9562 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
9563
9564 IEM_MC_ADVANCE_RIP();
9565 IEM_MC_END();
9566 }
9567 return VINF_SUCCESS;
9568}
9569
9570
9571/** Opcode 0x87. */
9572FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
9573{
9574 IEMOP_MNEMONIC("xchg Ev,Gv");
9575 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9576
9577 /*
9578 * If rm is denoting a register, no more instruction bytes.
9579 */
9580 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9581 {
9582 IEMOP_HLP_NO_LOCK_PREFIX();
9583
9584 switch (pIemCpu->enmEffOpSize)
9585 {
9586 case IEMMODE_16BIT:
9587 IEM_MC_BEGIN(0, 2);
9588 IEM_MC_LOCAL(uint16_t, uTmp1);
9589 IEM_MC_LOCAL(uint16_t, uTmp2);
9590
9591 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9592 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9593 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9594 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9595
9596 IEM_MC_ADVANCE_RIP();
9597 IEM_MC_END();
9598 return VINF_SUCCESS;
9599
9600 case IEMMODE_32BIT:
9601 IEM_MC_BEGIN(0, 2);
9602 IEM_MC_LOCAL(uint32_t, uTmp1);
9603 IEM_MC_LOCAL(uint32_t, uTmp2);
9604
9605 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9606 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9607 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9608 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9609
9610 IEM_MC_ADVANCE_RIP();
9611 IEM_MC_END();
9612 return VINF_SUCCESS;
9613
9614 case IEMMODE_64BIT:
9615 IEM_MC_BEGIN(0, 2);
9616 IEM_MC_LOCAL(uint64_t, uTmp1);
9617 IEM_MC_LOCAL(uint64_t, uTmp2);
9618
9619 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9620 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9621 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9622 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9623
9624 IEM_MC_ADVANCE_RIP();
9625 IEM_MC_END();
9626 return VINF_SUCCESS;
9627
9628 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9629 }
9630 }
9631 else
9632 {
9633 /*
9634 * We're accessing memory.
9635 */
9636 switch (pIemCpu->enmEffOpSize)
9637 {
9638/** @todo the register must be committed separately! */
9639 case IEMMODE_16BIT:
9640 IEM_MC_BEGIN(2, 2);
9641 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
9642 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
9643 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9644
9645 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9646 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9647 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9648 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
9649 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
9650
9651 IEM_MC_ADVANCE_RIP();
9652 IEM_MC_END();
9653 return VINF_SUCCESS;
9654
9655 case IEMMODE_32BIT:
9656 IEM_MC_BEGIN(2, 2);
9657 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
9658 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
9659 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9660
9661 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9662 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9663 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9664 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
9665 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
9666
9667 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
9668 IEM_MC_ADVANCE_RIP();
9669 IEM_MC_END();
9670 return VINF_SUCCESS;
9671
9672 case IEMMODE_64BIT:
9673 IEM_MC_BEGIN(2, 2);
9674 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
9675 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
9676 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9677
9678 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9679 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9680 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9681 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
9682 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
9683
9684 IEM_MC_ADVANCE_RIP();
9685 IEM_MC_END();
9686 return VINF_SUCCESS;
9687
9688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9689 }
9690 }
9691}
9692
9693
9694/** Opcode 0x88. */
9695FNIEMOP_DEF(iemOp_mov_Eb_Gb)
9696{
9697 IEMOP_MNEMONIC("mov Eb,Gb");
9698
9699 uint8_t bRm;
9700 IEM_OPCODE_GET_NEXT_U8(&bRm);
9701 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9702
9703 /*
9704 * If rm is denoting a register, no more instruction bytes.
9705 */
9706 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9707 {
9708 IEM_MC_BEGIN(0, 1);
9709 IEM_MC_LOCAL(uint8_t, u8Value);
9710 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9711 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
9712 IEM_MC_ADVANCE_RIP();
9713 IEM_MC_END();
9714 }
9715 else
9716 {
9717 /*
9718 * We're writing a register to memory.
9719 */
9720 IEM_MC_BEGIN(0, 2);
9721 IEM_MC_LOCAL(uint8_t, u8Value);
9722 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9723 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9724 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9725 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
9726 IEM_MC_ADVANCE_RIP();
9727 IEM_MC_END();
9728 }
9729 return VINF_SUCCESS;
9730
9731}
9732
9733
9734/** Opcode 0x89. */
9735FNIEMOP_DEF(iemOp_mov_Ev_Gv)
9736{
9737 IEMOP_MNEMONIC("mov Ev,Gv");
9738
9739 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9740 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9741
9742 /*
9743 * If rm is denoting a register, no more instruction bytes.
9744 */
9745 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9746 {
9747 switch (pIemCpu->enmEffOpSize)
9748 {
9749 case IEMMODE_16BIT:
9750 IEM_MC_BEGIN(0, 1);
9751 IEM_MC_LOCAL(uint16_t, u16Value);
9752 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9753 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
9754 IEM_MC_ADVANCE_RIP();
9755 IEM_MC_END();
9756 break;
9757
9758 case IEMMODE_32BIT:
9759 IEM_MC_BEGIN(0, 1);
9760 IEM_MC_LOCAL(uint32_t, u32Value);
9761 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9762 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
9763 IEM_MC_ADVANCE_RIP();
9764 IEM_MC_END();
9765 break;
9766
9767 case IEMMODE_64BIT:
9768 IEM_MC_BEGIN(0, 1);
9769 IEM_MC_LOCAL(uint64_t, u64Value);
9770 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9771 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
9772 IEM_MC_ADVANCE_RIP();
9773 IEM_MC_END();
9774 break;
9775 }
9776 }
9777 else
9778 {
9779 /*
9780 * We're writing a register to memory.
9781 */
9782 switch (pIemCpu->enmEffOpSize)
9783 {
9784 case IEMMODE_16BIT:
9785 IEM_MC_BEGIN(0, 2);
9786 IEM_MC_LOCAL(uint16_t, u16Value);
9787 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9788 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9789 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9790 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
9791 IEM_MC_ADVANCE_RIP();
9792 IEM_MC_END();
9793 break;
9794
9795 case IEMMODE_32BIT:
9796 IEM_MC_BEGIN(0, 2);
9797 IEM_MC_LOCAL(uint32_t, u32Value);
9798 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9799 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9800 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9801 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
9802 IEM_MC_ADVANCE_RIP();
9803 IEM_MC_END();
9804 break;
9805
9806 case IEMMODE_64BIT:
9807 IEM_MC_BEGIN(0, 2);
9808 IEM_MC_LOCAL(uint64_t, u64Value);
9809 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9810 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9811 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9812 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
9813 IEM_MC_ADVANCE_RIP();
9814 IEM_MC_END();
9815 break;
9816 }
9817 }
9818 return VINF_SUCCESS;
9819}
9820
9821
9822/** Opcode 0x8a. */
9823FNIEMOP_DEF(iemOp_mov_Gb_Eb)
9824{
9825 IEMOP_MNEMONIC("mov Gb,Eb");
9826
9827 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9828 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9829
9830 /*
9831 * If rm is denoting a register, no more instruction bytes.
9832 */
9833 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9834 {
9835 IEM_MC_BEGIN(0, 1);
9836 IEM_MC_LOCAL(uint8_t, u8Value);
9837 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9838 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
9839 IEM_MC_ADVANCE_RIP();
9840 IEM_MC_END();
9841 }
9842 else
9843 {
9844 /*
9845 * We're loading a register from memory.
9846 */
9847 IEM_MC_BEGIN(0, 2);
9848 IEM_MC_LOCAL(uint8_t, u8Value);
9849 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9850 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9851 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
9852 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
9853 IEM_MC_ADVANCE_RIP();
9854 IEM_MC_END();
9855 }
9856 return VINF_SUCCESS;
9857}
9858
9859
9860/** Opcode 0x8b. */
9861FNIEMOP_DEF(iemOp_mov_Gv_Ev)
9862{
9863 IEMOP_MNEMONIC("mov Gv,Ev");
9864
9865 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9866 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9867
9868 /*
9869 * If rm is denoting a register, no more instruction bytes.
9870 */
9871 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9872 {
9873 switch (pIemCpu->enmEffOpSize)
9874 {
9875 case IEMMODE_16BIT:
9876 IEM_MC_BEGIN(0, 1);
9877 IEM_MC_LOCAL(uint16_t, u16Value);
9878 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9879 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
9880 IEM_MC_ADVANCE_RIP();
9881 IEM_MC_END();
9882 break;
9883
9884 case IEMMODE_32BIT:
9885 IEM_MC_BEGIN(0, 1);
9886 IEM_MC_LOCAL(uint32_t, u32Value);
9887 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9888 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
9889 IEM_MC_ADVANCE_RIP();
9890 IEM_MC_END();
9891 break;
9892
9893 case IEMMODE_64BIT:
9894 IEM_MC_BEGIN(0, 1);
9895 IEM_MC_LOCAL(uint64_t, u64Value);
9896 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9897 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
9898 IEM_MC_ADVANCE_RIP();
9899 IEM_MC_END();
9900 break;
9901 }
9902 }
9903 else
9904 {
9905 /*
9906 * We're loading a register from memory.
9907 */
9908 switch (pIemCpu->enmEffOpSize)
9909 {
9910 case IEMMODE_16BIT:
9911 IEM_MC_BEGIN(0, 2);
9912 IEM_MC_LOCAL(uint16_t, u16Value);
9913 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9914 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9915 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
9916 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
9917 IEM_MC_ADVANCE_RIP();
9918 IEM_MC_END();
9919 break;
9920
9921 case IEMMODE_32BIT:
9922 IEM_MC_BEGIN(0, 2);
9923 IEM_MC_LOCAL(uint32_t, u32Value);
9924 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9925 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9926 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
9927 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
9928 IEM_MC_ADVANCE_RIP();
9929 IEM_MC_END();
9930 break;
9931
9932 case IEMMODE_64BIT:
9933 IEM_MC_BEGIN(0, 2);
9934 IEM_MC_LOCAL(uint64_t, u64Value);
9935 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9936 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9937 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
9938 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
9939 IEM_MC_ADVANCE_RIP();
9940 IEM_MC_END();
9941 break;
9942 }
9943 }
9944 return VINF_SUCCESS;
9945}
9946
9947
9948/** Opcode 0x63. */
9949FNIEMOP_DEF(iemOp_arpl_Ew_Gw_movsx_Gv_Ev)
9950{
9951 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9952 return FNIEMOP_CALL(iemOp_arpl_Ew_Gw);
9953 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
9954 return FNIEMOP_CALL(iemOp_mov_Gv_Ev);
9955 return FNIEMOP_CALL(iemOp_movsxd_Gv_Ev);
9956}
9957
9958
9959/** Opcode 0x8c. */
9960FNIEMOP_DEF(iemOp_mov_Ev_Sw)
9961{
9962 IEMOP_MNEMONIC("mov Ev,Sw");
9963
9964 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9965 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9966
9967 /*
9968 * Check that the destination register exists. The REX.R prefix is ignored.
9969 */
9970 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
9971 if ( iSegReg > X86_SREG_GS)
9972 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9973
9974 /*
9975 * If rm is denoting a register, no more instruction bytes.
9976 * In that case, the operand size is respected and the upper bits are
9977 * cleared (starting with some pentium).
9978 */
9979 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9980 {
9981 switch (pIemCpu->enmEffOpSize)
9982 {
9983 case IEMMODE_16BIT:
9984 IEM_MC_BEGIN(0, 1);
9985 IEM_MC_LOCAL(uint16_t, u16Value);
9986 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
9987 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
9988 IEM_MC_ADVANCE_RIP();
9989 IEM_MC_END();
9990 break;
9991
9992 case IEMMODE_32BIT:
9993 IEM_MC_BEGIN(0, 1);
9994 IEM_MC_LOCAL(uint32_t, u32Value);
9995 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
9996 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
9997 IEM_MC_ADVANCE_RIP();
9998 IEM_MC_END();
9999 break;
10000
10001 case IEMMODE_64BIT:
10002 IEM_MC_BEGIN(0, 1);
10003 IEM_MC_LOCAL(uint64_t, u64Value);
10004 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
10005 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
10006 IEM_MC_ADVANCE_RIP();
10007 IEM_MC_END();
10008 break;
10009 }
10010 }
10011 else
10012 {
10013 /*
10014 * We're saving the register to memory. The access is word sized
10015 * regardless of operand size prefixes.
10016 */
10017#if 0 /* not necessary */
10018 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
10019#endif
10020 IEM_MC_BEGIN(0, 2);
10021 IEM_MC_LOCAL(uint16_t, u16Value);
10022 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10023 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10024 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
10025 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
10026 IEM_MC_ADVANCE_RIP();
10027 IEM_MC_END();
10028 }
10029 return VINF_SUCCESS;
10030}
10031
10032
10033
10034
10035/** Opcode 0x8d. */
10036FNIEMOP_DEF(iemOp_lea_Gv_M)
10037{
10038 IEMOP_MNEMONIC("lea Gv,M");
10039 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10040 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10041 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10042 return IEMOP_RAISE_INVALID_OPCODE(); /* no register form */
10043
10044 switch (pIemCpu->enmEffOpSize)
10045 {
10046 case IEMMODE_16BIT:
10047 IEM_MC_BEGIN(0, 2);
10048 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10049 IEM_MC_LOCAL(uint16_t, u16Cast);
10050 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10051 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
10052 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
10053 IEM_MC_ADVANCE_RIP();
10054 IEM_MC_END();
10055 return VINF_SUCCESS;
10056
10057 case IEMMODE_32BIT:
10058 IEM_MC_BEGIN(0, 2);
10059 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10060 IEM_MC_LOCAL(uint32_t, u32Cast);
10061 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10062 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
10063 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
10064 IEM_MC_ADVANCE_RIP();
10065 IEM_MC_END();
10066 return VINF_SUCCESS;
10067
10068 case IEMMODE_64BIT:
10069 IEM_MC_BEGIN(0, 1);
10070 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10071 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10072 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
10073 IEM_MC_ADVANCE_RIP();
10074 IEM_MC_END();
10075 return VINF_SUCCESS;
10076 }
10077 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
10078}
10079
10080
10081/** Opcode 0x8e. */
10082FNIEMOP_DEF(iemOp_mov_Sw_Ev)
10083{
10084 IEMOP_MNEMONIC("mov Sw,Ev");
10085
10086 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10087 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10088
10089 /*
10090 * The practical operand size is 16-bit.
10091 */
10092#if 0 /* not necessary */
10093 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
10094#endif
10095
10096 /*
10097 * Check that the destination register exists and can be used with this
10098 * instruction. The REX.R prefix is ignored.
10099 */
10100 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
10101 if ( iSegReg == X86_SREG_CS
10102 || iSegReg > X86_SREG_GS)
10103 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10104
10105 /*
10106 * If rm is denoting a register, no more instruction bytes.
10107 */
10108 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10109 {
10110 IEM_MC_BEGIN(2, 0);
10111 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
10112 IEM_MC_ARG(uint16_t, u16Value, 1);
10113 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10114 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
10115 IEM_MC_END();
10116 }
10117 else
10118 {
10119 /*
10120 * We're loading the register from memory. The access is word sized
10121 * regardless of operand size prefixes.
10122 */
10123 IEM_MC_BEGIN(2, 1);
10124 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
10125 IEM_MC_ARG(uint16_t, u16Value, 1);
10126 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10127 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10128 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
10129 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
10130 IEM_MC_END();
10131 }
10132 return VINF_SUCCESS;
10133}
10134
10135
10136/** Opcode 0x8f /0. */
10137FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm)
10138{
10139 /* This bugger is rather annoying as it requires rSP to be updated before
10140 doing the effective address calculations. Will eventually require a
10141 split between the R/M+SIB decoding and the effective address
10142 calculation - which is something that is required for any attempt at
10143 reusing this code for a recompiler. It may also be good to have if we
10144 need to delay #UD exception caused by invalid lock prefixes.
10145
10146 For now, we'll do a mostly safe interpreter-only implementation here. */
10147 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
10148 * now until tests show it's checked.. */
10149 IEMOP_MNEMONIC("pop Ev");
10150 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10151
10152 /* Register access is relatively easy and can share code. */
10153 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10154 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10155
10156 /*
10157 * Memory target.
10158 *
10159 * Intel says that RSP is incremented before it's used in any effective
10160 * address calcuations. This means some serious extra annoyance here since
10161 * we decode and calculate the effective address in one step and like to
10162 * delay committing registers till everything is done.
10163 *
10164 * So, we'll decode and calculate the effective address twice. This will
10165 * require some recoding if turned into a recompiler.
10166 */
10167 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
10168
10169#ifndef TST_IEM_CHECK_MC
10170 /* Calc effective address with modified ESP. */
10171 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
10172 RTGCPTR GCPtrEff;
10173 VBOXSTRICTRC rcStrict;
10174 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
10175 if (rcStrict != VINF_SUCCESS)
10176 return rcStrict;
10177 pIemCpu->offOpcode = offOpcodeSaved;
10178
10179 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10180 uint64_t const RspSaved = pCtx->rsp;
10181 switch (pIemCpu->enmEffOpSize)
10182 {
10183 case IEMMODE_16BIT: iemRegAddToRsp(pIemCpu, pCtx, 2); break;
10184 case IEMMODE_32BIT: iemRegAddToRsp(pIemCpu, pCtx, 4); break;
10185 case IEMMODE_64BIT: iemRegAddToRsp(pIemCpu, pCtx, 8); break;
10186 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10187 }
10188 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
10189 Assert(rcStrict == VINF_SUCCESS);
10190 pCtx->rsp = RspSaved;
10191
10192 /* Perform the operation - this should be CImpl. */
10193 RTUINT64U TmpRsp;
10194 TmpRsp.u = pCtx->rsp;
10195 switch (pIemCpu->enmEffOpSize)
10196 {
10197 case IEMMODE_16BIT:
10198 {
10199 uint16_t u16Value;
10200 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
10201 if (rcStrict == VINF_SUCCESS)
10202 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
10203 break;
10204 }
10205
10206 case IEMMODE_32BIT:
10207 {
10208 uint32_t u32Value;
10209 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
10210 if (rcStrict == VINF_SUCCESS)
10211 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
10212 break;
10213 }
10214
10215 case IEMMODE_64BIT:
10216 {
10217 uint64_t u64Value;
10218 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
10219 if (rcStrict == VINF_SUCCESS)
10220 rcStrict = iemMemStoreDataU64(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
10221 break;
10222 }
10223
10224 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10225 }
10226 if (rcStrict == VINF_SUCCESS)
10227 {
10228 pCtx->rsp = TmpRsp.u;
10229 iemRegUpdateRipAndClearRF(pIemCpu);
10230 }
10231 return rcStrict;
10232
10233#else
10234 return VERR_IEM_IPE_2;
10235#endif
10236}
10237
10238
10239/** Opcode 0x8f. */
10240FNIEMOP_DEF(iemOp_Grp1A)
10241{
10242 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10243 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only pop Ev in this group. */
10244 return IEMOP_RAISE_INVALID_OPCODE();
10245 return FNIEMOP_CALL_1(iemOp_pop_Ev, bRm);
10246}
10247
10248
10249/**
10250 * Common 'xchg reg,rAX' helper.
10251 */
10252FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
10253{
10254 IEMOP_HLP_NO_LOCK_PREFIX();
10255
10256 iReg |= pIemCpu->uRexB;
10257 switch (pIemCpu->enmEffOpSize)
10258 {
10259 case IEMMODE_16BIT:
10260 IEM_MC_BEGIN(0, 2);
10261 IEM_MC_LOCAL(uint16_t, u16Tmp1);
10262 IEM_MC_LOCAL(uint16_t, u16Tmp2);
10263 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
10264 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
10265 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
10266 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
10267 IEM_MC_ADVANCE_RIP();
10268 IEM_MC_END();
10269 return VINF_SUCCESS;
10270
10271 case IEMMODE_32BIT:
10272 IEM_MC_BEGIN(0, 2);
10273 IEM_MC_LOCAL(uint32_t, u32Tmp1);
10274 IEM_MC_LOCAL(uint32_t, u32Tmp2);
10275 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
10276 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
10277 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
10278 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
10279 IEM_MC_ADVANCE_RIP();
10280 IEM_MC_END();
10281 return VINF_SUCCESS;
10282
10283 case IEMMODE_64BIT:
10284 IEM_MC_BEGIN(0, 2);
10285 IEM_MC_LOCAL(uint64_t, u64Tmp1);
10286 IEM_MC_LOCAL(uint64_t, u64Tmp2);
10287 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
10288 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
10289 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
10290 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
10291 IEM_MC_ADVANCE_RIP();
10292 IEM_MC_END();
10293 return VINF_SUCCESS;
10294
10295 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10296 }
10297}
10298
10299
10300/** Opcode 0x90. */
10301FNIEMOP_DEF(iemOp_nop)
10302{
10303 /* R8/R8D and RAX/EAX can be exchanged. */
10304 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
10305 {
10306 IEMOP_MNEMONIC("xchg r8,rAX");
10307 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
10308 }
10309
10310 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
10311 IEMOP_MNEMONIC("pause");
10312 else
10313 IEMOP_MNEMONIC("nop");
10314 IEM_MC_BEGIN(0, 0);
10315 IEM_MC_ADVANCE_RIP();
10316 IEM_MC_END();
10317 return VINF_SUCCESS;
10318}
10319
10320
10321/** Opcode 0x91. */
10322FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
10323{
10324 IEMOP_MNEMONIC("xchg rCX,rAX");
10325 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
10326}
10327
10328
10329/** Opcode 0x92. */
10330FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
10331{
10332 IEMOP_MNEMONIC("xchg rDX,rAX");
10333 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
10334}
10335
10336
10337/** Opcode 0x93. */
10338FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
10339{
10340 IEMOP_MNEMONIC("xchg rBX,rAX");
10341 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
10342}
10343
10344
10345/** Opcode 0x94. */
10346FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
10347{
10348 IEMOP_MNEMONIC("xchg rSX,rAX");
10349 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
10350}
10351
10352
10353/** Opcode 0x95. */
10354FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
10355{
10356 IEMOP_MNEMONIC("xchg rBP,rAX");
10357 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
10358}
10359
10360
10361/** Opcode 0x96. */
10362FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
10363{
10364 IEMOP_MNEMONIC("xchg rSI,rAX");
10365 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
10366}
10367
10368
10369/** Opcode 0x97. */
10370FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
10371{
10372 IEMOP_MNEMONIC("xchg rDI,rAX");
10373 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
10374}
10375
10376
10377/** Opcode 0x98. */
10378FNIEMOP_DEF(iemOp_cbw)
10379{
10380 IEMOP_HLP_NO_LOCK_PREFIX();
10381 switch (pIemCpu->enmEffOpSize)
10382 {
10383 case IEMMODE_16BIT:
10384 IEMOP_MNEMONIC("cbw");
10385 IEM_MC_BEGIN(0, 1);
10386 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 7) {
10387 IEM_MC_OR_GREG_U16(X86_GREG_xAX, UINT16_C(0xff00));
10388 } IEM_MC_ELSE() {
10389 IEM_MC_AND_GREG_U16(X86_GREG_xAX, UINT16_C(0x00ff));
10390 } IEM_MC_ENDIF();
10391 IEM_MC_ADVANCE_RIP();
10392 IEM_MC_END();
10393 return VINF_SUCCESS;
10394
10395 case IEMMODE_32BIT:
10396 IEMOP_MNEMONIC("cwde");
10397 IEM_MC_BEGIN(0, 1);
10398 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
10399 IEM_MC_OR_GREG_U32(X86_GREG_xAX, UINT32_C(0xffff0000));
10400 } IEM_MC_ELSE() {
10401 IEM_MC_AND_GREG_U32(X86_GREG_xAX, UINT32_C(0x0000ffff));
10402 } IEM_MC_ENDIF();
10403 IEM_MC_ADVANCE_RIP();
10404 IEM_MC_END();
10405 return VINF_SUCCESS;
10406
10407 case IEMMODE_64BIT:
10408 IEMOP_MNEMONIC("cdqe");
10409 IEM_MC_BEGIN(0, 1);
10410 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
10411 IEM_MC_OR_GREG_U64(X86_GREG_xAX, UINT64_C(0xffffffff00000000));
10412 } IEM_MC_ELSE() {
10413 IEM_MC_AND_GREG_U64(X86_GREG_xAX, UINT64_C(0x00000000ffffffff));
10414 } IEM_MC_ENDIF();
10415 IEM_MC_ADVANCE_RIP();
10416 IEM_MC_END();
10417 return VINF_SUCCESS;
10418
10419 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10420 }
10421}
10422
10423
10424/** Opcode 0x99. */
10425FNIEMOP_DEF(iemOp_cwd)
10426{
10427 IEMOP_HLP_NO_LOCK_PREFIX();
10428 switch (pIemCpu->enmEffOpSize)
10429 {
10430 case IEMMODE_16BIT:
10431 IEMOP_MNEMONIC("cwd");
10432 IEM_MC_BEGIN(0, 1);
10433 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
10434 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
10435 } IEM_MC_ELSE() {
10436 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
10437 } IEM_MC_ENDIF();
10438 IEM_MC_ADVANCE_RIP();
10439 IEM_MC_END();
10440 return VINF_SUCCESS;
10441
10442 case IEMMODE_32BIT:
10443 IEMOP_MNEMONIC("cdq");
10444 IEM_MC_BEGIN(0, 1);
10445 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
10446 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
10447 } IEM_MC_ELSE() {
10448 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
10449 } IEM_MC_ENDIF();
10450 IEM_MC_ADVANCE_RIP();
10451 IEM_MC_END();
10452 return VINF_SUCCESS;
10453
10454 case IEMMODE_64BIT:
10455 IEMOP_MNEMONIC("cqo");
10456 IEM_MC_BEGIN(0, 1);
10457 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
10458 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
10459 } IEM_MC_ELSE() {
10460 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
10461 } IEM_MC_ENDIF();
10462 IEM_MC_ADVANCE_RIP();
10463 IEM_MC_END();
10464 return VINF_SUCCESS;
10465
10466 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10467 }
10468}
10469
10470
10471/** Opcode 0x9a. */
10472FNIEMOP_DEF(iemOp_call_Ap)
10473{
10474 IEMOP_MNEMONIC("call Ap");
10475 IEMOP_HLP_NO_64BIT();
10476
10477 /* Decode the far pointer address and pass it on to the far call C implementation. */
10478 uint32_t offSeg;
10479 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10480 IEM_OPCODE_GET_NEXT_U32(&offSeg);
10481 else
10482 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
10483 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
10484 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
10485 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, pIemCpu->enmEffOpSize);
10486}
10487
10488
10489/** Opcode 0x9b. (aka fwait) */
10490FNIEMOP_DEF(iemOp_wait)
10491{
10492 IEMOP_MNEMONIC("wait");
10493 IEMOP_HLP_NO_LOCK_PREFIX();
10494
10495 IEM_MC_BEGIN(0, 0);
10496 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10497 IEM_MC_MAYBE_RAISE_FPU_XCPT();
10498 IEM_MC_ADVANCE_RIP();
10499 IEM_MC_END();
10500 return VINF_SUCCESS;
10501}
10502
10503
10504/** Opcode 0x9c. */
10505FNIEMOP_DEF(iemOp_pushf_Fv)
10506{
10507 IEMOP_HLP_NO_LOCK_PREFIX();
10508 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10509 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
10510}
10511
10512
10513/** Opcode 0x9d. */
10514FNIEMOP_DEF(iemOp_popf_Fv)
10515{
10516 IEMOP_HLP_NO_LOCK_PREFIX();
10517 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10518 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
10519}
10520
10521
10522/** Opcode 0x9e. */
10523FNIEMOP_DEF(iemOp_sahf)
10524{
10525 IEMOP_MNEMONIC("sahf");
10526 IEMOP_HLP_NO_LOCK_PREFIX();
10527 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
10528 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
10529 return IEMOP_RAISE_INVALID_OPCODE();
10530 IEM_MC_BEGIN(0, 2);
10531 IEM_MC_LOCAL(uint32_t, u32Flags);
10532 IEM_MC_LOCAL(uint32_t, EFlags);
10533 IEM_MC_FETCH_EFLAGS(EFlags);
10534 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Flags, X86_GREG_xSP/*=AH*/);
10535 IEM_MC_AND_LOCAL_U32(u32Flags, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
10536 IEM_MC_AND_LOCAL_U32(EFlags, UINT32_C(0xffffff00));
10537 IEM_MC_OR_LOCAL_U32(u32Flags, X86_EFL_1);
10538 IEM_MC_OR_2LOCS_U32(EFlags, u32Flags);
10539 IEM_MC_COMMIT_EFLAGS(EFlags);
10540 IEM_MC_ADVANCE_RIP();
10541 IEM_MC_END();
10542 return VINF_SUCCESS;
10543}
10544
10545
10546/** Opcode 0x9f. */
10547FNIEMOP_DEF(iemOp_lahf)
10548{
10549 IEMOP_MNEMONIC("lahf");
10550 IEMOP_HLP_NO_LOCK_PREFIX();
10551 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
10552 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
10553 return IEMOP_RAISE_INVALID_OPCODE();
10554 IEM_MC_BEGIN(0, 1);
10555 IEM_MC_LOCAL(uint8_t, u8Flags);
10556 IEM_MC_FETCH_EFLAGS_U8(u8Flags);
10557 IEM_MC_STORE_GREG_U8(X86_GREG_xSP/*=AH*/, u8Flags);
10558 IEM_MC_ADVANCE_RIP();
10559 IEM_MC_END();
10560 return VINF_SUCCESS;
10561}
10562
10563
10564/**
10565 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
10566 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
10567 * prefixes. Will return on failures.
10568 * @param a_GCPtrMemOff The variable to store the offset in.
10569 */
10570#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
10571 do \
10572 { \
10573 switch (pIemCpu->enmEffAddrMode) \
10574 { \
10575 case IEMMODE_16BIT: \
10576 IEM_OPCODE_GET_NEXT_U16_ZX_U64(&(a_GCPtrMemOff)); \
10577 break; \
10578 case IEMMODE_32BIT: \
10579 IEM_OPCODE_GET_NEXT_U32_ZX_U64(&(a_GCPtrMemOff)); \
10580 break; \
10581 case IEMMODE_64BIT: \
10582 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
10583 break; \
10584 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
10585 } \
10586 IEMOP_HLP_NO_LOCK_PREFIX(); \
10587 } while (0)
10588
10589/** Opcode 0xa0. */
10590FNIEMOP_DEF(iemOp_mov_Al_Ob)
10591{
10592 /*
10593 * Get the offset and fend of lock prefixes.
10594 */
10595 RTGCPTR GCPtrMemOff;
10596 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10597
10598 /*
10599 * Fetch AL.
10600 */
10601 IEM_MC_BEGIN(0,1);
10602 IEM_MC_LOCAL(uint8_t, u8Tmp);
10603 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10604 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
10605 IEM_MC_ADVANCE_RIP();
10606 IEM_MC_END();
10607 return VINF_SUCCESS;
10608}
10609
10610
10611/** Opcode 0xa1. */
10612FNIEMOP_DEF(iemOp_mov_rAX_Ov)
10613{
10614 /*
10615 * Get the offset and fend of lock prefixes.
10616 */
10617 IEMOP_MNEMONIC("mov rAX,Ov");
10618 RTGCPTR GCPtrMemOff;
10619 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10620
10621 /*
10622 * Fetch rAX.
10623 */
10624 switch (pIemCpu->enmEffOpSize)
10625 {
10626 case IEMMODE_16BIT:
10627 IEM_MC_BEGIN(0,1);
10628 IEM_MC_LOCAL(uint16_t, u16Tmp);
10629 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10630 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
10631 IEM_MC_ADVANCE_RIP();
10632 IEM_MC_END();
10633 return VINF_SUCCESS;
10634
10635 case IEMMODE_32BIT:
10636 IEM_MC_BEGIN(0,1);
10637 IEM_MC_LOCAL(uint32_t, u32Tmp);
10638 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10639 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
10640 IEM_MC_ADVANCE_RIP();
10641 IEM_MC_END();
10642 return VINF_SUCCESS;
10643
10644 case IEMMODE_64BIT:
10645 IEM_MC_BEGIN(0,1);
10646 IEM_MC_LOCAL(uint64_t, u64Tmp);
10647 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10648 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
10649 IEM_MC_ADVANCE_RIP();
10650 IEM_MC_END();
10651 return VINF_SUCCESS;
10652
10653 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10654 }
10655}
10656
10657
10658/** Opcode 0xa2. */
10659FNIEMOP_DEF(iemOp_mov_Ob_AL)
10660{
10661 /*
10662 * Get the offset and fend of lock prefixes.
10663 */
10664 RTGCPTR GCPtrMemOff;
10665 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10666
10667 /*
10668 * Store AL.
10669 */
10670 IEM_MC_BEGIN(0,1);
10671 IEM_MC_LOCAL(uint8_t, u8Tmp);
10672 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
10673 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
10674 IEM_MC_ADVANCE_RIP();
10675 IEM_MC_END();
10676 return VINF_SUCCESS;
10677}
10678
10679
10680/** Opcode 0xa3. */
10681FNIEMOP_DEF(iemOp_mov_Ov_rAX)
10682{
10683 /*
10684 * Get the offset and fend of lock prefixes.
10685 */
10686 RTGCPTR GCPtrMemOff;
10687 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10688
10689 /*
10690 * Store rAX.
10691 */
10692 switch (pIemCpu->enmEffOpSize)
10693 {
10694 case IEMMODE_16BIT:
10695 IEM_MC_BEGIN(0,1);
10696 IEM_MC_LOCAL(uint16_t, u16Tmp);
10697 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
10698 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
10699 IEM_MC_ADVANCE_RIP();
10700 IEM_MC_END();
10701 return VINF_SUCCESS;
10702
10703 case IEMMODE_32BIT:
10704 IEM_MC_BEGIN(0,1);
10705 IEM_MC_LOCAL(uint32_t, u32Tmp);
10706 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
10707 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
10708 IEM_MC_ADVANCE_RIP();
10709 IEM_MC_END();
10710 return VINF_SUCCESS;
10711
10712 case IEMMODE_64BIT:
10713 IEM_MC_BEGIN(0,1);
10714 IEM_MC_LOCAL(uint64_t, u64Tmp);
10715 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
10716 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
10717 IEM_MC_ADVANCE_RIP();
10718 IEM_MC_END();
10719 return VINF_SUCCESS;
10720
10721 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10722 }
10723}
10724
10725/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
10726#define IEM_MOVS_CASE(ValBits, AddrBits) \
10727 IEM_MC_BEGIN(0, 2); \
10728 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
10729 IEM_MC_LOCAL(RTGCPTR, uAddr); \
10730 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
10731 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
10732 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
10733 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
10734 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
10735 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10736 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10737 } IEM_MC_ELSE() { \
10738 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10739 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10740 } IEM_MC_ENDIF(); \
10741 IEM_MC_ADVANCE_RIP(); \
10742 IEM_MC_END();
10743
10744/** Opcode 0xa4. */
10745FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
10746{
10747 IEMOP_HLP_NO_LOCK_PREFIX();
10748
10749 /*
10750 * Use the C implementation if a repeat prefix is encountered.
10751 */
10752 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
10753 {
10754 IEMOP_MNEMONIC("rep movsb Xb,Yb");
10755 switch (pIemCpu->enmEffAddrMode)
10756 {
10757 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
10758 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
10759 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
10760 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10761 }
10762 }
10763 IEMOP_MNEMONIC("movsb Xb,Yb");
10764
10765 /*
10766 * Sharing case implementation with movs[wdq] below.
10767 */
10768 switch (pIemCpu->enmEffAddrMode)
10769 {
10770 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
10771 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
10772 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
10773 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10774 }
10775 return VINF_SUCCESS;
10776}
10777
10778
10779/** Opcode 0xa5. */
10780FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
10781{
10782 IEMOP_HLP_NO_LOCK_PREFIX();
10783
10784 /*
10785 * Use the C implementation if a repeat prefix is encountered.
10786 */
10787 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
10788 {
10789 IEMOP_MNEMONIC("rep movs Xv,Yv");
10790 switch (pIemCpu->enmEffOpSize)
10791 {
10792 case IEMMODE_16BIT:
10793 switch (pIemCpu->enmEffAddrMode)
10794 {
10795 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
10796 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
10797 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
10798 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10799 }
10800 break;
10801 case IEMMODE_32BIT:
10802 switch (pIemCpu->enmEffAddrMode)
10803 {
10804 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
10805 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
10806 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
10807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10808 }
10809 case IEMMODE_64BIT:
10810 switch (pIemCpu->enmEffAddrMode)
10811 {
10812 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
10813 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
10814 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
10815 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10816 }
10817 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10818 }
10819 }
10820 IEMOP_MNEMONIC("movs Xv,Yv");
10821
10822 /*
10823 * Annoying double switch here.
10824 * Using ugly macro for implementing the cases, sharing it with movsb.
10825 */
10826 switch (pIemCpu->enmEffOpSize)
10827 {
10828 case IEMMODE_16BIT:
10829 switch (pIemCpu->enmEffAddrMode)
10830 {
10831 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
10832 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
10833 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
10834 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10835 }
10836 break;
10837
10838 case IEMMODE_32BIT:
10839 switch (pIemCpu->enmEffAddrMode)
10840 {
10841 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
10842 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
10843 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
10844 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10845 }
10846 break;
10847
10848 case IEMMODE_64BIT:
10849 switch (pIemCpu->enmEffAddrMode)
10850 {
10851 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
10852 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
10853 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
10854 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10855 }
10856 break;
10857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10858 }
10859 return VINF_SUCCESS;
10860}
10861
10862#undef IEM_MOVS_CASE
10863
10864/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
10865#define IEM_CMPS_CASE(ValBits, AddrBits) \
10866 IEM_MC_BEGIN(3, 3); \
10867 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
10868 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
10869 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
10870 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
10871 IEM_MC_LOCAL(RTGCPTR, uAddr); \
10872 \
10873 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
10874 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
10875 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
10876 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
10877 IEM_MC_REF_LOCAL(puValue1, uValue1); \
10878 IEM_MC_REF_EFLAGS(pEFlags); \
10879 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
10880 \
10881 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
10882 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10883 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10884 } IEM_MC_ELSE() { \
10885 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10886 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10887 } IEM_MC_ENDIF(); \
10888 IEM_MC_ADVANCE_RIP(); \
10889 IEM_MC_END(); \
10890
10891/** Opcode 0xa6. */
10892FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
10893{
10894 IEMOP_HLP_NO_LOCK_PREFIX();
10895
10896 /*
10897 * Use the C implementation if a repeat prefix is encountered.
10898 */
10899 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
10900 {
10901 IEMOP_MNEMONIC("repe cmps Xb,Yb");
10902 switch (pIemCpu->enmEffAddrMode)
10903 {
10904 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
10905 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
10906 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
10907 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10908 }
10909 }
10910 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
10911 {
10912 IEMOP_MNEMONIC("repe cmps Xb,Yb");
10913 switch (pIemCpu->enmEffAddrMode)
10914 {
10915 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
10916 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
10917 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
10918 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10919 }
10920 }
10921 IEMOP_MNEMONIC("cmps Xb,Yb");
10922
10923 /*
10924 * Sharing case implementation with cmps[wdq] below.
10925 */
10926 switch (pIemCpu->enmEffAddrMode)
10927 {
10928 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
10929 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
10930 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
10931 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10932 }
10933 return VINF_SUCCESS;
10934
10935}
10936
10937
10938/** Opcode 0xa7. */
10939FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
10940{
10941 IEMOP_HLP_NO_LOCK_PREFIX();
10942
10943 /*
10944 * Use the C implementation if a repeat prefix is encountered.
10945 */
10946 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
10947 {
10948 IEMOP_MNEMONIC("repe cmps Xv,Yv");
10949 switch (pIemCpu->enmEffOpSize)
10950 {
10951 case IEMMODE_16BIT:
10952 switch (pIemCpu->enmEffAddrMode)
10953 {
10954 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
10955 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
10956 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
10957 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10958 }
10959 break;
10960 case IEMMODE_32BIT:
10961 switch (pIemCpu->enmEffAddrMode)
10962 {
10963 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
10964 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
10965 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
10966 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10967 }
10968 case IEMMODE_64BIT:
10969 switch (pIemCpu->enmEffAddrMode)
10970 {
10971 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
10972 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
10973 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
10974 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10975 }
10976 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10977 }
10978 }
10979
10980 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
10981 {
10982 IEMOP_MNEMONIC("repne cmps Xv,Yv");
10983 switch (pIemCpu->enmEffOpSize)
10984 {
10985 case IEMMODE_16BIT:
10986 switch (pIemCpu->enmEffAddrMode)
10987 {
10988 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
10989 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
10990 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
10991 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10992 }
10993 break;
10994 case IEMMODE_32BIT:
10995 switch (pIemCpu->enmEffAddrMode)
10996 {
10997 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
10998 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
10999 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
11000 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11001 }
11002 case IEMMODE_64BIT:
11003 switch (pIemCpu->enmEffAddrMode)
11004 {
11005 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11006 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
11007 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
11008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11009 }
11010 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11011 }
11012 }
11013
11014 IEMOP_MNEMONIC("cmps Xv,Yv");
11015
11016 /*
11017 * Annoying double switch here.
11018 * Using ugly macro for implementing the cases, sharing it with cmpsb.
11019 */
11020 switch (pIemCpu->enmEffOpSize)
11021 {
11022 case IEMMODE_16BIT:
11023 switch (pIemCpu->enmEffAddrMode)
11024 {
11025 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
11026 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
11027 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
11028 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11029 }
11030 break;
11031
11032 case IEMMODE_32BIT:
11033 switch (pIemCpu->enmEffAddrMode)
11034 {
11035 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
11036 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
11037 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
11038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11039 }
11040 break;
11041
11042 case IEMMODE_64BIT:
11043 switch (pIemCpu->enmEffAddrMode)
11044 {
11045 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11046 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
11047 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
11048 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11049 }
11050 break;
11051 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11052 }
11053 return VINF_SUCCESS;
11054
11055}
11056
11057#undef IEM_CMPS_CASE
11058
11059/** Opcode 0xa8. */
11060FNIEMOP_DEF(iemOp_test_AL_Ib)
11061{
11062 IEMOP_MNEMONIC("test al,Ib");
11063 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11064 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
11065}
11066
11067
11068/** Opcode 0xa9. */
11069FNIEMOP_DEF(iemOp_test_eAX_Iz)
11070{
11071 IEMOP_MNEMONIC("test rAX,Iz");
11072 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11073 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
11074}
11075
11076
11077/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
11078#define IEM_STOS_CASE(ValBits, AddrBits) \
11079 IEM_MC_BEGIN(0, 2); \
11080 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
11081 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11082 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
11083 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11084 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
11085 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11086 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11087 } IEM_MC_ELSE() { \
11088 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11089 } IEM_MC_ENDIF(); \
11090 IEM_MC_ADVANCE_RIP(); \
11091 IEM_MC_END(); \
11092
11093/** Opcode 0xaa. */
11094FNIEMOP_DEF(iemOp_stosb_Yb_AL)
11095{
11096 IEMOP_HLP_NO_LOCK_PREFIX();
11097
11098 /*
11099 * Use the C implementation if a repeat prefix is encountered.
11100 */
11101 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11102 {
11103 IEMOP_MNEMONIC("rep stos Yb,al");
11104 switch (pIemCpu->enmEffAddrMode)
11105 {
11106 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
11107 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
11108 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
11109 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11110 }
11111 }
11112 IEMOP_MNEMONIC("stos Yb,al");
11113
11114 /*
11115 * Sharing case implementation with stos[wdq] below.
11116 */
11117 switch (pIemCpu->enmEffAddrMode)
11118 {
11119 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
11120 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
11121 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
11122 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11123 }
11124 return VINF_SUCCESS;
11125}
11126
11127
11128/** Opcode 0xab. */
11129FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
11130{
11131 IEMOP_HLP_NO_LOCK_PREFIX();
11132
11133 /*
11134 * Use the C implementation if a repeat prefix is encountered.
11135 */
11136 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11137 {
11138 IEMOP_MNEMONIC("rep stos Yv,rAX");
11139 switch (pIemCpu->enmEffOpSize)
11140 {
11141 case IEMMODE_16BIT:
11142 switch (pIemCpu->enmEffAddrMode)
11143 {
11144 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
11145 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
11146 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
11147 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11148 }
11149 break;
11150 case IEMMODE_32BIT:
11151 switch (pIemCpu->enmEffAddrMode)
11152 {
11153 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
11154 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
11155 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
11156 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11157 }
11158 case IEMMODE_64BIT:
11159 switch (pIemCpu->enmEffAddrMode)
11160 {
11161 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11162 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
11163 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
11164 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11165 }
11166 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11167 }
11168 }
11169 IEMOP_MNEMONIC("stos Yv,rAX");
11170
11171 /*
11172 * Annoying double switch here.
11173 * Using ugly macro for implementing the cases, sharing it with stosb.
11174 */
11175 switch (pIemCpu->enmEffOpSize)
11176 {
11177 case IEMMODE_16BIT:
11178 switch (pIemCpu->enmEffAddrMode)
11179 {
11180 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
11181 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
11182 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
11183 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11184 }
11185 break;
11186
11187 case IEMMODE_32BIT:
11188 switch (pIemCpu->enmEffAddrMode)
11189 {
11190 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
11191 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
11192 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
11193 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11194 }
11195 break;
11196
11197 case IEMMODE_64BIT:
11198 switch (pIemCpu->enmEffAddrMode)
11199 {
11200 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11201 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
11202 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
11203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11204 }
11205 break;
11206 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11207 }
11208 return VINF_SUCCESS;
11209}
11210
11211#undef IEM_STOS_CASE
11212
11213/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
11214#define IEM_LODS_CASE(ValBits, AddrBits) \
11215 IEM_MC_BEGIN(0, 2); \
11216 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
11217 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11218 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
11219 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
11220 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
11221 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11222 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11223 } IEM_MC_ELSE() { \
11224 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11225 } IEM_MC_ENDIF(); \
11226 IEM_MC_ADVANCE_RIP(); \
11227 IEM_MC_END();
11228
11229/** Opcode 0xac. */
11230FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
11231{
11232 IEMOP_HLP_NO_LOCK_PREFIX();
11233
11234 /*
11235 * Use the C implementation if a repeat prefix is encountered.
11236 */
11237 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11238 {
11239 IEMOP_MNEMONIC("rep lodsb al,Xb");
11240 switch (pIemCpu->enmEffAddrMode)
11241 {
11242 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
11243 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
11244 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
11245 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11246 }
11247 }
11248 IEMOP_MNEMONIC("lodsb al,Xb");
11249
11250 /*
11251 * Sharing case implementation with stos[wdq] below.
11252 */
11253 switch (pIemCpu->enmEffAddrMode)
11254 {
11255 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
11256 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
11257 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
11258 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11259 }
11260 return VINF_SUCCESS;
11261}
11262
11263
11264/** Opcode 0xad. */
11265FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
11266{
11267 IEMOP_HLP_NO_LOCK_PREFIX();
11268
11269 /*
11270 * Use the C implementation if a repeat prefix is encountered.
11271 */
11272 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11273 {
11274 IEMOP_MNEMONIC("rep lods rAX,Xv");
11275 switch (pIemCpu->enmEffOpSize)
11276 {
11277 case IEMMODE_16BIT:
11278 switch (pIemCpu->enmEffAddrMode)
11279 {
11280 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
11281 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
11282 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
11283 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11284 }
11285 break;
11286 case IEMMODE_32BIT:
11287 switch (pIemCpu->enmEffAddrMode)
11288 {
11289 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
11290 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
11291 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
11292 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11293 }
11294 case IEMMODE_64BIT:
11295 switch (pIemCpu->enmEffAddrMode)
11296 {
11297 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11298 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
11299 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
11300 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11301 }
11302 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11303 }
11304 }
11305 IEMOP_MNEMONIC("lods rAX,Xv");
11306
11307 /*
11308 * Annoying double switch here.
11309 * Using ugly macro for implementing the cases, sharing it with lodsb.
11310 */
11311 switch (pIemCpu->enmEffOpSize)
11312 {
11313 case IEMMODE_16BIT:
11314 switch (pIemCpu->enmEffAddrMode)
11315 {
11316 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
11317 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
11318 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
11319 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11320 }
11321 break;
11322
11323 case IEMMODE_32BIT:
11324 switch (pIemCpu->enmEffAddrMode)
11325 {
11326 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
11327 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
11328 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
11329 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11330 }
11331 break;
11332
11333 case IEMMODE_64BIT:
11334 switch (pIemCpu->enmEffAddrMode)
11335 {
11336 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11337 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
11338 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
11339 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11340 }
11341 break;
11342 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11343 }
11344 return VINF_SUCCESS;
11345}
11346
11347#undef IEM_LODS_CASE
11348
11349/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
11350#define IEM_SCAS_CASE(ValBits, AddrBits) \
11351 IEM_MC_BEGIN(3, 2); \
11352 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
11353 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
11354 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
11355 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11356 \
11357 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11358 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
11359 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
11360 IEM_MC_REF_EFLAGS(pEFlags); \
11361 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
11362 \
11363 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11364 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11365 } IEM_MC_ELSE() { \
11366 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11367 } IEM_MC_ENDIF(); \
11368 IEM_MC_ADVANCE_RIP(); \
11369 IEM_MC_END();
11370
11371/** Opcode 0xae. */
11372FNIEMOP_DEF(iemOp_scasb_AL_Xb)
11373{
11374 IEMOP_HLP_NO_LOCK_PREFIX();
11375
11376 /*
11377 * Use the C implementation if a repeat prefix is encountered.
11378 */
11379 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11380 {
11381 IEMOP_MNEMONIC("repe scasb al,Xb");
11382 switch (pIemCpu->enmEffAddrMode)
11383 {
11384 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
11385 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
11386 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
11387 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11388 }
11389 }
11390 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11391 {
11392 IEMOP_MNEMONIC("repne scasb al,Xb");
11393 switch (pIemCpu->enmEffAddrMode)
11394 {
11395 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
11396 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
11397 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
11398 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11399 }
11400 }
11401 IEMOP_MNEMONIC("scasb al,Xb");
11402
11403 /*
11404 * Sharing case implementation with stos[wdq] below.
11405 */
11406 switch (pIemCpu->enmEffAddrMode)
11407 {
11408 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
11409 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
11410 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
11411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11412 }
11413 return VINF_SUCCESS;
11414}
11415
11416
11417/** Opcode 0xaf. */
11418FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
11419{
11420 IEMOP_HLP_NO_LOCK_PREFIX();
11421
11422 /*
11423 * Use the C implementation if a repeat prefix is encountered.
11424 */
11425 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11426 {
11427 IEMOP_MNEMONIC("repe scas rAX,Xv");
11428 switch (pIemCpu->enmEffOpSize)
11429 {
11430 case IEMMODE_16BIT:
11431 switch (pIemCpu->enmEffAddrMode)
11432 {
11433 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
11434 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
11435 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
11436 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11437 }
11438 break;
11439 case IEMMODE_32BIT:
11440 switch (pIemCpu->enmEffAddrMode)
11441 {
11442 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
11443 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
11444 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
11445 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11446 }
11447 case IEMMODE_64BIT:
11448 switch (pIemCpu->enmEffAddrMode)
11449 {
11450 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
11451 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
11452 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
11453 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11454 }
11455 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11456 }
11457 }
11458 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11459 {
11460 IEMOP_MNEMONIC("repne scas rAX,Xv");
11461 switch (pIemCpu->enmEffOpSize)
11462 {
11463 case IEMMODE_16BIT:
11464 switch (pIemCpu->enmEffAddrMode)
11465 {
11466 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m16);
11467 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m32);
11468 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m64);
11469 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11470 }
11471 break;
11472 case IEMMODE_32BIT:
11473 switch (pIemCpu->enmEffAddrMode)
11474 {
11475 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m16);
11476 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m32);
11477 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m64);
11478 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11479 }
11480 case IEMMODE_64BIT:
11481 switch (pIemCpu->enmEffAddrMode)
11482 {
11483 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11484 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_rax_m32);
11485 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_rax_m64);
11486 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11487 }
11488 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11489 }
11490 }
11491 IEMOP_MNEMONIC("scas rAX,Xv");
11492
11493 /*
11494 * Annoying double switch here.
11495 * Using ugly macro for implementing the cases, sharing it with scasb.
11496 */
11497 switch (pIemCpu->enmEffOpSize)
11498 {
11499 case IEMMODE_16BIT:
11500 switch (pIemCpu->enmEffAddrMode)
11501 {
11502 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
11503 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
11504 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
11505 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11506 }
11507 break;
11508
11509 case IEMMODE_32BIT:
11510 switch (pIemCpu->enmEffAddrMode)
11511 {
11512 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
11513 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
11514 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
11515 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11516 }
11517 break;
11518
11519 case IEMMODE_64BIT:
11520 switch (pIemCpu->enmEffAddrMode)
11521 {
11522 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11523 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
11524 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
11525 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11526 }
11527 break;
11528 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11529 }
11530 return VINF_SUCCESS;
11531}
11532
11533#undef IEM_SCAS_CASE
11534
11535/**
11536 * Common 'mov r8, imm8' helper.
11537 */
11538FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
11539{
11540 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11541 IEMOP_HLP_NO_LOCK_PREFIX();
11542
11543 IEM_MC_BEGIN(0, 1);
11544 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
11545 IEM_MC_STORE_GREG_U8(iReg, u8Value);
11546 IEM_MC_ADVANCE_RIP();
11547 IEM_MC_END();
11548
11549 return VINF_SUCCESS;
11550}
11551
11552
11553/** Opcode 0xb0. */
11554FNIEMOP_DEF(iemOp_mov_AL_Ib)
11555{
11556 IEMOP_MNEMONIC("mov AL,Ib");
11557 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX | pIemCpu->uRexB);
11558}
11559
11560
11561/** Opcode 0xb1. */
11562FNIEMOP_DEF(iemOp_CL_Ib)
11563{
11564 IEMOP_MNEMONIC("mov CL,Ib");
11565 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX | pIemCpu->uRexB);
11566}
11567
11568
11569/** Opcode 0xb2. */
11570FNIEMOP_DEF(iemOp_DL_Ib)
11571{
11572 IEMOP_MNEMONIC("mov DL,Ib");
11573 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX | pIemCpu->uRexB);
11574}
11575
11576
11577/** Opcode 0xb3. */
11578FNIEMOP_DEF(iemOp_BL_Ib)
11579{
11580 IEMOP_MNEMONIC("mov BL,Ib");
11581 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX | pIemCpu->uRexB);
11582}
11583
11584
11585/** Opcode 0xb4. */
11586FNIEMOP_DEF(iemOp_mov_AH_Ib)
11587{
11588 IEMOP_MNEMONIC("mov AH,Ib");
11589 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP | pIemCpu->uRexB);
11590}
11591
11592
11593/** Opcode 0xb5. */
11594FNIEMOP_DEF(iemOp_CH_Ib)
11595{
11596 IEMOP_MNEMONIC("mov CH,Ib");
11597 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP | pIemCpu->uRexB);
11598}
11599
11600
11601/** Opcode 0xb6. */
11602FNIEMOP_DEF(iemOp_DH_Ib)
11603{
11604 IEMOP_MNEMONIC("mov DH,Ib");
11605 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI | pIemCpu->uRexB);
11606}
11607
11608
11609/** Opcode 0xb7. */
11610FNIEMOP_DEF(iemOp_BH_Ib)
11611{
11612 IEMOP_MNEMONIC("mov BH,Ib");
11613 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI | pIemCpu->uRexB);
11614}
11615
11616
11617/**
11618 * Common 'mov regX,immX' helper.
11619 */
11620FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
11621{
11622 switch (pIemCpu->enmEffOpSize)
11623 {
11624 case IEMMODE_16BIT:
11625 {
11626 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
11627 IEMOP_HLP_NO_LOCK_PREFIX();
11628
11629 IEM_MC_BEGIN(0, 1);
11630 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
11631 IEM_MC_STORE_GREG_U16(iReg, u16Value);
11632 IEM_MC_ADVANCE_RIP();
11633 IEM_MC_END();
11634 break;
11635 }
11636
11637 case IEMMODE_32BIT:
11638 {
11639 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
11640 IEMOP_HLP_NO_LOCK_PREFIX();
11641
11642 IEM_MC_BEGIN(0, 1);
11643 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
11644 IEM_MC_STORE_GREG_U32(iReg, u32Value);
11645 IEM_MC_ADVANCE_RIP();
11646 IEM_MC_END();
11647 break;
11648 }
11649 case IEMMODE_64BIT:
11650 {
11651 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm); /* 64-bit immediate! */
11652 IEMOP_HLP_NO_LOCK_PREFIX();
11653
11654 IEM_MC_BEGIN(0, 1);
11655 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
11656 IEM_MC_STORE_GREG_U64(iReg, u64Value);
11657 IEM_MC_ADVANCE_RIP();
11658 IEM_MC_END();
11659 break;
11660 }
11661 }
11662
11663 return VINF_SUCCESS;
11664}
11665
11666
11667/** Opcode 0xb8. */
11668FNIEMOP_DEF(iemOp_eAX_Iv)
11669{
11670 IEMOP_MNEMONIC("mov rAX,IV");
11671 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX | pIemCpu->uRexB);
11672}
11673
11674
11675/** Opcode 0xb9. */
11676FNIEMOP_DEF(iemOp_eCX_Iv)
11677{
11678 IEMOP_MNEMONIC("mov rCX,IV");
11679 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX | pIemCpu->uRexB);
11680}
11681
11682
11683/** Opcode 0xba. */
11684FNIEMOP_DEF(iemOp_eDX_Iv)
11685{
11686 IEMOP_MNEMONIC("mov rDX,IV");
11687 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX | pIemCpu->uRexB);
11688}
11689
11690
11691/** Opcode 0xbb. */
11692FNIEMOP_DEF(iemOp_eBX_Iv)
11693{
11694 IEMOP_MNEMONIC("mov rBX,IV");
11695 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX | pIemCpu->uRexB);
11696}
11697
11698
11699/** Opcode 0xbc. */
11700FNIEMOP_DEF(iemOp_eSP_Iv)
11701{
11702 IEMOP_MNEMONIC("mov rSP,IV");
11703 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP | pIemCpu->uRexB);
11704}
11705
11706
11707/** Opcode 0xbd. */
11708FNIEMOP_DEF(iemOp_eBP_Iv)
11709{
11710 IEMOP_MNEMONIC("mov rBP,IV");
11711 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP | pIemCpu->uRexB);
11712}
11713
11714
11715/** Opcode 0xbe. */
11716FNIEMOP_DEF(iemOp_eSI_Iv)
11717{
11718 IEMOP_MNEMONIC("mov rSI,IV");
11719 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI | pIemCpu->uRexB);
11720}
11721
11722
11723/** Opcode 0xbf. */
11724FNIEMOP_DEF(iemOp_eDI_Iv)
11725{
11726 IEMOP_MNEMONIC("mov rDI,IV");
11727 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI | pIemCpu->uRexB);
11728}
11729
11730
11731/** Opcode 0xc0. */
11732FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
11733{
11734 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11735 PCIEMOPSHIFTSIZES pImpl;
11736 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11737 {
11738 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
11739 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
11740 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
11741 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
11742 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
11743 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
11744 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
11745 case 6: return IEMOP_RAISE_INVALID_OPCODE();
11746 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
11747 }
11748 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
11749
11750 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11751 {
11752 /* register */
11753 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11754 IEMOP_HLP_NO_LOCK_PREFIX();
11755 IEM_MC_BEGIN(3, 0);
11756 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11757 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11758 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11759 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11760 IEM_MC_REF_EFLAGS(pEFlags);
11761 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
11762 IEM_MC_ADVANCE_RIP();
11763 IEM_MC_END();
11764 }
11765 else
11766 {
11767 /* memory */
11768 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11769 IEM_MC_BEGIN(3, 2);
11770 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11771 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11772 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11773 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11774
11775 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11776 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11777 IEM_MC_ASSIGN(cShiftArg, cShift);
11778 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11779 IEM_MC_FETCH_EFLAGS(EFlags);
11780 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
11781
11782 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
11783 IEM_MC_COMMIT_EFLAGS(EFlags);
11784 IEM_MC_ADVANCE_RIP();
11785 IEM_MC_END();
11786 }
11787 return VINF_SUCCESS;
11788}
11789
11790
11791/** Opcode 0xc1. */
11792FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
11793{
11794 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11795 PCIEMOPSHIFTSIZES pImpl;
11796 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11797 {
11798 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
11799 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
11800 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
11801 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
11802 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
11803 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
11804 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
11805 case 6: return IEMOP_RAISE_INVALID_OPCODE();
11806 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
11807 }
11808 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
11809
11810 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11811 {
11812 /* register */
11813 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11814 IEMOP_HLP_NO_LOCK_PREFIX();
11815 switch (pIemCpu->enmEffOpSize)
11816 {
11817 case IEMMODE_16BIT:
11818 IEM_MC_BEGIN(3, 0);
11819 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
11820 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11821 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11822 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11823 IEM_MC_REF_EFLAGS(pEFlags);
11824 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
11825 IEM_MC_ADVANCE_RIP();
11826 IEM_MC_END();
11827 return VINF_SUCCESS;
11828
11829 case IEMMODE_32BIT:
11830 IEM_MC_BEGIN(3, 0);
11831 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
11832 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11833 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11834 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11835 IEM_MC_REF_EFLAGS(pEFlags);
11836 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
11837 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
11838 IEM_MC_ADVANCE_RIP();
11839 IEM_MC_END();
11840 return VINF_SUCCESS;
11841
11842 case IEMMODE_64BIT:
11843 IEM_MC_BEGIN(3, 0);
11844 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
11845 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11846 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11847 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11848 IEM_MC_REF_EFLAGS(pEFlags);
11849 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
11850 IEM_MC_ADVANCE_RIP();
11851 IEM_MC_END();
11852 return VINF_SUCCESS;
11853
11854 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11855 }
11856 }
11857 else
11858 {
11859 /* memory */
11860 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11861 switch (pIemCpu->enmEffOpSize)
11862 {
11863 case IEMMODE_16BIT:
11864 IEM_MC_BEGIN(3, 2);
11865 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
11866 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11867 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11868 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11869
11870 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11871 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11872 IEM_MC_ASSIGN(cShiftArg, cShift);
11873 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11874 IEM_MC_FETCH_EFLAGS(EFlags);
11875 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
11876
11877 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
11878 IEM_MC_COMMIT_EFLAGS(EFlags);
11879 IEM_MC_ADVANCE_RIP();
11880 IEM_MC_END();
11881 return VINF_SUCCESS;
11882
11883 case IEMMODE_32BIT:
11884 IEM_MC_BEGIN(3, 2);
11885 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
11886 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11887 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11888 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11889
11890 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11891 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11892 IEM_MC_ASSIGN(cShiftArg, cShift);
11893 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11894 IEM_MC_FETCH_EFLAGS(EFlags);
11895 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
11896
11897 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
11898 IEM_MC_COMMIT_EFLAGS(EFlags);
11899 IEM_MC_ADVANCE_RIP();
11900 IEM_MC_END();
11901 return VINF_SUCCESS;
11902
11903 case IEMMODE_64BIT:
11904 IEM_MC_BEGIN(3, 2);
11905 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
11906 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11907 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11908 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11909
11910 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11911 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11912 IEM_MC_ASSIGN(cShiftArg, cShift);
11913 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11914 IEM_MC_FETCH_EFLAGS(EFlags);
11915 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
11916
11917 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
11918 IEM_MC_COMMIT_EFLAGS(EFlags);
11919 IEM_MC_ADVANCE_RIP();
11920 IEM_MC_END();
11921 return VINF_SUCCESS;
11922
11923 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11924 }
11925 }
11926}
11927
11928
11929/** Opcode 0xc2. */
11930FNIEMOP_DEF(iemOp_retn_Iw)
11931{
11932 IEMOP_MNEMONIC("retn Iw");
11933 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
11934 IEMOP_HLP_NO_LOCK_PREFIX();
11935 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11936 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
11937}
11938
11939
11940/** Opcode 0xc3. */
11941FNIEMOP_DEF(iemOp_retn)
11942{
11943 IEMOP_MNEMONIC("retn");
11944 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11945 IEMOP_HLP_NO_LOCK_PREFIX();
11946 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
11947}
11948
11949
11950/** Opcode 0xc4. */
11951FNIEMOP_DEF(iemOp_les_Gv_Mp)
11952{
11953 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11954 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
11955 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11956 {
11957 IEMOP_MNEMONIC("2-byte-vex");
11958 /* The LES instruction is invalid 64-bit mode. In legacy and
11959 compatability mode it is invalid with MOD=3.
11960 The use as a VEX prefix is made possible by assigning the inverted
11961 REX.R to the top MOD bit, and the top bit in the inverted register
11962 specifier to the bottom MOD bit, thereby effectively limiting 32-bit
11963 to accessing registers 0..7 in this VEX form. */
11964 /** @todo VEX: Just use new tables for it. */
11965 return IEMOP_RAISE_INVALID_OPCODE();
11966 }
11967 IEMOP_MNEMONIC("les Gv,Mp");
11968 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm);
11969}
11970
11971
11972/** Opcode 0xc5. */
11973FNIEMOP_DEF(iemOp_lds_Gv_Mp)
11974{
11975 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11976 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
11977 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11978 {
11979 IEMOP_MNEMONIC("3-byte-vex");
11980 /* The LDS instruction is invalid 64-bit mode. In legacy and
11981 compatability mode it is invalid with MOD=3.
11982 The use as a VEX prefix is made possible by assigning the inverted
11983 REX.R and REX.X to the two MOD bits, since the REX bits are ignored
11984 outside of 64-bit mode. */
11985 /** @todo VEX: Just use new tables for it. */
11986 return IEMOP_RAISE_INVALID_OPCODE();
11987 }
11988 IEMOP_MNEMONIC("lds Gv,Mp");
11989 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_DS, bRm);
11990}
11991
11992
11993/** Opcode 0xc6. */
11994FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
11995{
11996 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11997 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11998 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
11999 return IEMOP_RAISE_INVALID_OPCODE();
12000 IEMOP_MNEMONIC("mov Eb,Ib");
12001
12002 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12003 {
12004 /* register access */
12005 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
12006 IEM_MC_BEGIN(0, 0);
12007 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
12008 IEM_MC_ADVANCE_RIP();
12009 IEM_MC_END();
12010 }
12011 else
12012 {
12013 /* memory access. */
12014 IEM_MC_BEGIN(0, 1);
12015 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12016 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
12017 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
12018 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
12019 IEM_MC_ADVANCE_RIP();
12020 IEM_MC_END();
12021 }
12022 return VINF_SUCCESS;
12023}
12024
12025
12026/** Opcode 0xc7. */
12027FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
12028{
12029 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12030 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12031 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
12032 return IEMOP_RAISE_INVALID_OPCODE();
12033 IEMOP_MNEMONIC("mov Ev,Iz");
12034
12035 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12036 {
12037 /* register access */
12038 switch (pIemCpu->enmEffOpSize)
12039 {
12040 case IEMMODE_16BIT:
12041 IEM_MC_BEGIN(0, 0);
12042 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12043 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
12044 IEM_MC_ADVANCE_RIP();
12045 IEM_MC_END();
12046 return VINF_SUCCESS;
12047
12048 case IEMMODE_32BIT:
12049 IEM_MC_BEGIN(0, 0);
12050 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
12051 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
12052 IEM_MC_ADVANCE_RIP();
12053 IEM_MC_END();
12054 return VINF_SUCCESS;
12055
12056 case IEMMODE_64BIT:
12057 IEM_MC_BEGIN(0, 0);
12058 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
12059 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
12060 IEM_MC_ADVANCE_RIP();
12061 IEM_MC_END();
12062 return VINF_SUCCESS;
12063
12064 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12065 }
12066 }
12067 else
12068 {
12069 /* memory access. */
12070 switch (pIemCpu->enmEffOpSize)
12071 {
12072 case IEMMODE_16BIT:
12073 IEM_MC_BEGIN(0, 1);
12074 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12075 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
12076 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12077 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
12078 IEM_MC_ADVANCE_RIP();
12079 IEM_MC_END();
12080 return VINF_SUCCESS;
12081
12082 case IEMMODE_32BIT:
12083 IEM_MC_BEGIN(0, 1);
12084 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12085 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
12086 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
12087 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
12088 IEM_MC_ADVANCE_RIP();
12089 IEM_MC_END();
12090 return VINF_SUCCESS;
12091
12092 case IEMMODE_64BIT:
12093 IEM_MC_BEGIN(0, 1);
12094 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12095 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
12096 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
12097 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
12098 IEM_MC_ADVANCE_RIP();
12099 IEM_MC_END();
12100 return VINF_SUCCESS;
12101
12102 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12103 }
12104 }
12105}
12106
12107
12108
12109
12110/** Opcode 0xc8. */
12111FNIEMOP_DEF(iemOp_enter_Iw_Ib)
12112{
12113 IEMOP_MNEMONIC("enter Iw,Ib");
12114 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12115 IEMOP_HLP_NO_LOCK_PREFIX();
12116 uint16_t cbFrame; IEM_OPCODE_GET_NEXT_U16(&cbFrame);
12117 uint8_t u8NestingLevel; IEM_OPCODE_GET_NEXT_U8(&u8NestingLevel);
12118 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_enter, pIemCpu->enmEffOpSize, cbFrame, u8NestingLevel);
12119}
12120
12121
12122/** Opcode 0xc9. */
12123FNIEMOP_DEF(iemOp_leave)
12124{
12125 IEMOP_MNEMONIC("retn");
12126 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12127 IEMOP_HLP_NO_LOCK_PREFIX();
12128 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
12129}
12130
12131
12132/** Opcode 0xca. */
12133FNIEMOP_DEF(iemOp_retf_Iw)
12134{
12135 IEMOP_MNEMONIC("retf Iw");
12136 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12137 IEMOP_HLP_NO_LOCK_PREFIX();
12138 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12139 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
12140}
12141
12142
12143/** Opcode 0xcb. */
12144FNIEMOP_DEF(iemOp_retf)
12145{
12146 IEMOP_MNEMONIC("retf");
12147 IEMOP_HLP_NO_LOCK_PREFIX();
12148 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12149 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
12150}
12151
12152
12153/** Opcode 0xcc. */
12154FNIEMOP_DEF(iemOp_int_3)
12155{
12156 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
12157}
12158
12159
12160/** Opcode 0xcd. */
12161FNIEMOP_DEF(iemOp_int_Ib)
12162{
12163 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
12164 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
12165}
12166
12167
12168/** Opcode 0xce. */
12169FNIEMOP_DEF(iemOp_into)
12170{
12171 IEM_MC_BEGIN(2, 0);
12172 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
12173 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
12174 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
12175 IEM_MC_END();
12176 return VINF_SUCCESS;
12177}
12178
12179
12180/** Opcode 0xcf. */
12181FNIEMOP_DEF(iemOp_iret)
12182{
12183 IEMOP_MNEMONIC("iret");
12184 IEMOP_HLP_NO_LOCK_PREFIX();
12185 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
12186}
12187
12188
12189/** Opcode 0xd0. */
12190FNIEMOP_DEF(iemOp_Grp2_Eb_1)
12191{
12192 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12193 PCIEMOPSHIFTSIZES pImpl;
12194 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12195 {
12196 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
12197 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
12198 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
12199 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
12200 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
12201 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
12202 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
12203 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12204 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
12205 }
12206 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12207
12208 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12209 {
12210 /* register */
12211 IEMOP_HLP_NO_LOCK_PREFIX();
12212 IEM_MC_BEGIN(3, 0);
12213 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12214 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
12215 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12216 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12217 IEM_MC_REF_EFLAGS(pEFlags);
12218 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12219 IEM_MC_ADVANCE_RIP();
12220 IEM_MC_END();
12221 }
12222 else
12223 {
12224 /* memory */
12225 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12226 IEM_MC_BEGIN(3, 2);
12227 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12228 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
12229 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12230 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12231
12232 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12233 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12234 IEM_MC_FETCH_EFLAGS(EFlags);
12235 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12236
12237 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
12238 IEM_MC_COMMIT_EFLAGS(EFlags);
12239 IEM_MC_ADVANCE_RIP();
12240 IEM_MC_END();
12241 }
12242 return VINF_SUCCESS;
12243}
12244
12245
12246
12247/** Opcode 0xd1. */
12248FNIEMOP_DEF(iemOp_Grp2_Ev_1)
12249{
12250 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12251 PCIEMOPSHIFTSIZES pImpl;
12252 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12253 {
12254 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
12255 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
12256 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
12257 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
12258 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
12259 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
12260 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
12261 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12262 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
12263 }
12264 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12265
12266 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12267 {
12268 /* register */
12269 IEMOP_HLP_NO_LOCK_PREFIX();
12270 switch (pIemCpu->enmEffOpSize)
12271 {
12272 case IEMMODE_16BIT:
12273 IEM_MC_BEGIN(3, 0);
12274 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12275 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12276 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12277 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12278 IEM_MC_REF_EFLAGS(pEFlags);
12279 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12280 IEM_MC_ADVANCE_RIP();
12281 IEM_MC_END();
12282 return VINF_SUCCESS;
12283
12284 case IEMMODE_32BIT:
12285 IEM_MC_BEGIN(3, 0);
12286 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12287 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12288 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12289 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12290 IEM_MC_REF_EFLAGS(pEFlags);
12291 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12292 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
12293 IEM_MC_ADVANCE_RIP();
12294 IEM_MC_END();
12295 return VINF_SUCCESS;
12296
12297 case IEMMODE_64BIT:
12298 IEM_MC_BEGIN(3, 0);
12299 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12300 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12301 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12302 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12303 IEM_MC_REF_EFLAGS(pEFlags);
12304 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12305 IEM_MC_ADVANCE_RIP();
12306 IEM_MC_END();
12307 return VINF_SUCCESS;
12308
12309 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12310 }
12311 }
12312 else
12313 {
12314 /* memory */
12315 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12316 switch (pIemCpu->enmEffOpSize)
12317 {
12318 case IEMMODE_16BIT:
12319 IEM_MC_BEGIN(3, 2);
12320 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12321 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12322 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12323 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12324
12325 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12326 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12327 IEM_MC_FETCH_EFLAGS(EFlags);
12328 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12329
12330 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
12331 IEM_MC_COMMIT_EFLAGS(EFlags);
12332 IEM_MC_ADVANCE_RIP();
12333 IEM_MC_END();
12334 return VINF_SUCCESS;
12335
12336 case IEMMODE_32BIT:
12337 IEM_MC_BEGIN(3, 2);
12338 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12339 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12340 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12341 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12342
12343 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12344 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12345 IEM_MC_FETCH_EFLAGS(EFlags);
12346 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12347
12348 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
12349 IEM_MC_COMMIT_EFLAGS(EFlags);
12350 IEM_MC_ADVANCE_RIP();
12351 IEM_MC_END();
12352 return VINF_SUCCESS;
12353
12354 case IEMMODE_64BIT:
12355 IEM_MC_BEGIN(3, 2);
12356 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12357 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12358 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12359 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12360
12361 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12362 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12363 IEM_MC_FETCH_EFLAGS(EFlags);
12364 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12365
12366 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
12367 IEM_MC_COMMIT_EFLAGS(EFlags);
12368 IEM_MC_ADVANCE_RIP();
12369 IEM_MC_END();
12370 return VINF_SUCCESS;
12371
12372 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12373 }
12374 }
12375}
12376
12377
12378/** Opcode 0xd2. */
12379FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
12380{
12381 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12382 PCIEMOPSHIFTSIZES pImpl;
12383 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12384 {
12385 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
12386 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
12387 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
12388 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
12389 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
12390 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
12391 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
12392 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12393 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
12394 }
12395 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12396
12397 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12398 {
12399 /* register */
12400 IEMOP_HLP_NO_LOCK_PREFIX();
12401 IEM_MC_BEGIN(3, 0);
12402 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12403 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12404 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12405 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12406 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12407 IEM_MC_REF_EFLAGS(pEFlags);
12408 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12409 IEM_MC_ADVANCE_RIP();
12410 IEM_MC_END();
12411 }
12412 else
12413 {
12414 /* memory */
12415 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12416 IEM_MC_BEGIN(3, 2);
12417 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12418 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12419 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12420 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12421
12422 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12423 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12424 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12425 IEM_MC_FETCH_EFLAGS(EFlags);
12426 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12427
12428 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
12429 IEM_MC_COMMIT_EFLAGS(EFlags);
12430 IEM_MC_ADVANCE_RIP();
12431 IEM_MC_END();
12432 }
12433 return VINF_SUCCESS;
12434}
12435
12436
12437/** Opcode 0xd3. */
12438FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
12439{
12440 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12441 PCIEMOPSHIFTSIZES pImpl;
12442 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12443 {
12444 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
12445 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
12446 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
12447 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
12448 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
12449 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
12450 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
12451 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12452 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
12453 }
12454 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12455
12456 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12457 {
12458 /* register */
12459 IEMOP_HLP_NO_LOCK_PREFIX();
12460 switch (pIemCpu->enmEffOpSize)
12461 {
12462 case IEMMODE_16BIT:
12463 IEM_MC_BEGIN(3, 0);
12464 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12465 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12466 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12467 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12468 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12469 IEM_MC_REF_EFLAGS(pEFlags);
12470 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12471 IEM_MC_ADVANCE_RIP();
12472 IEM_MC_END();
12473 return VINF_SUCCESS;
12474
12475 case IEMMODE_32BIT:
12476 IEM_MC_BEGIN(3, 0);
12477 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12478 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12479 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12480 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12481 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12482 IEM_MC_REF_EFLAGS(pEFlags);
12483 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12484 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
12485 IEM_MC_ADVANCE_RIP();
12486 IEM_MC_END();
12487 return VINF_SUCCESS;
12488
12489 case IEMMODE_64BIT:
12490 IEM_MC_BEGIN(3, 0);
12491 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12492 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12493 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12494 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12495 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12496 IEM_MC_REF_EFLAGS(pEFlags);
12497 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12498 IEM_MC_ADVANCE_RIP();
12499 IEM_MC_END();
12500 return VINF_SUCCESS;
12501
12502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12503 }
12504 }
12505 else
12506 {
12507 /* memory */
12508 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12509 switch (pIemCpu->enmEffOpSize)
12510 {
12511 case IEMMODE_16BIT:
12512 IEM_MC_BEGIN(3, 2);
12513 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12514 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12515 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12516 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12517
12518 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12519 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12520 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12521 IEM_MC_FETCH_EFLAGS(EFlags);
12522 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12523
12524 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
12525 IEM_MC_COMMIT_EFLAGS(EFlags);
12526 IEM_MC_ADVANCE_RIP();
12527 IEM_MC_END();
12528 return VINF_SUCCESS;
12529
12530 case IEMMODE_32BIT:
12531 IEM_MC_BEGIN(3, 2);
12532 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12533 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12534 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12535 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12536
12537 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12538 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12539 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12540 IEM_MC_FETCH_EFLAGS(EFlags);
12541 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12542
12543 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
12544 IEM_MC_COMMIT_EFLAGS(EFlags);
12545 IEM_MC_ADVANCE_RIP();
12546 IEM_MC_END();
12547 return VINF_SUCCESS;
12548
12549 case IEMMODE_64BIT:
12550 IEM_MC_BEGIN(3, 2);
12551 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12552 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12553 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12554 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12555
12556 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12557 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12558 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12559 IEM_MC_FETCH_EFLAGS(EFlags);
12560 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12561
12562 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
12563 IEM_MC_COMMIT_EFLAGS(EFlags);
12564 IEM_MC_ADVANCE_RIP();
12565 IEM_MC_END();
12566 return VINF_SUCCESS;
12567
12568 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12569 }
12570 }
12571}
12572
12573/** Opcode 0xd4. */
12574FNIEMOP_DEF(iemOp_aam_Ib)
12575{
12576 IEMOP_MNEMONIC("aam Ib");
12577 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
12578 IEMOP_HLP_NO_LOCK_PREFIX();
12579 IEMOP_HLP_NO_64BIT();
12580 if (!bImm)
12581 return IEMOP_RAISE_DIVIDE_ERROR();
12582 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aam, bImm);
12583}
12584
12585
12586/** Opcode 0xd5. */
12587FNIEMOP_DEF(iemOp_aad_Ib)
12588{
12589 IEMOP_MNEMONIC("aad Ib");
12590 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
12591 IEMOP_HLP_NO_LOCK_PREFIX();
12592 IEMOP_HLP_NO_64BIT();
12593 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aad, bImm);
12594}
12595
12596
12597/** Opcode 0xd7. */
12598FNIEMOP_DEF(iemOp_xlat)
12599{
12600 IEMOP_MNEMONIC("xlat");
12601 IEMOP_HLP_NO_LOCK_PREFIX();
12602 switch (pIemCpu->enmEffAddrMode)
12603 {
12604 case IEMMODE_16BIT:
12605 IEM_MC_BEGIN(2, 0);
12606 IEM_MC_LOCAL(uint8_t, u8Tmp);
12607 IEM_MC_LOCAL(uint16_t, u16Addr);
12608 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
12609 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
12610 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
12611 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
12612 IEM_MC_ADVANCE_RIP();
12613 IEM_MC_END();
12614 return VINF_SUCCESS;
12615
12616 case IEMMODE_32BIT:
12617 IEM_MC_BEGIN(2, 0);
12618 IEM_MC_LOCAL(uint8_t, u8Tmp);
12619 IEM_MC_LOCAL(uint32_t, u32Addr);
12620 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
12621 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
12622 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
12623 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
12624 IEM_MC_ADVANCE_RIP();
12625 IEM_MC_END();
12626 return VINF_SUCCESS;
12627
12628 case IEMMODE_64BIT:
12629 IEM_MC_BEGIN(2, 0);
12630 IEM_MC_LOCAL(uint8_t, u8Tmp);
12631 IEM_MC_LOCAL(uint64_t, u64Addr);
12632 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
12633 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
12634 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
12635 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
12636 IEM_MC_ADVANCE_RIP();
12637 IEM_MC_END();
12638 return VINF_SUCCESS;
12639
12640 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12641 }
12642}
12643
12644
12645/**
12646 * Common worker for FPU instructions working on ST0 and STn, and storing the
12647 * result in ST0.
12648 *
12649 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12650 */
12651FNIEMOP_DEF_2(iemOpHlpFpu_st0_stN, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
12652{
12653 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12654
12655 IEM_MC_BEGIN(3, 1);
12656 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
12657 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
12658 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12659 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
12660
12661 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12662 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12663 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
12664 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
12665 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
12666 IEM_MC_ELSE()
12667 IEM_MC_FPU_STACK_UNDERFLOW(0);
12668 IEM_MC_ENDIF();
12669 IEM_MC_USED_FPU();
12670 IEM_MC_ADVANCE_RIP();
12671
12672 IEM_MC_END();
12673 return VINF_SUCCESS;
12674}
12675
12676
12677/**
12678 * Common worker for FPU instructions working on ST0 and STn, and only affecting
12679 * flags.
12680 *
12681 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12682 */
12683FNIEMOP_DEF_2(iemOpHlpFpuNoStore_st0_stN, uint8_t, bRm, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
12684{
12685 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12686
12687 IEM_MC_BEGIN(3, 1);
12688 IEM_MC_LOCAL(uint16_t, u16Fsw);
12689 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12690 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12691 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
12692
12693 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12694 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12695 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
12696 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
12697 IEM_MC_UPDATE_FSW(u16Fsw);
12698 IEM_MC_ELSE()
12699 IEM_MC_FPU_STACK_UNDERFLOW(UINT8_MAX);
12700 IEM_MC_ENDIF();
12701 IEM_MC_USED_FPU();
12702 IEM_MC_ADVANCE_RIP();
12703
12704 IEM_MC_END();
12705 return VINF_SUCCESS;
12706}
12707
12708
12709/**
12710 * Common worker for FPU instructions working on ST0 and STn, only affecting
12711 * flags, and popping when done.
12712 *
12713 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12714 */
12715FNIEMOP_DEF_2(iemOpHlpFpuNoStore_st0_stN_pop, uint8_t, bRm, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
12716{
12717 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12718
12719 IEM_MC_BEGIN(3, 1);
12720 IEM_MC_LOCAL(uint16_t, u16Fsw);
12721 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12722 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12723 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
12724
12725 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12726 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12727 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
12728 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
12729 IEM_MC_UPDATE_FSW_THEN_POP(u16Fsw);
12730 IEM_MC_ELSE()
12731 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(UINT8_MAX);
12732 IEM_MC_ENDIF();
12733 IEM_MC_USED_FPU();
12734 IEM_MC_ADVANCE_RIP();
12735
12736 IEM_MC_END();
12737 return VINF_SUCCESS;
12738}
12739
12740
12741/** Opcode 0xd8 11/0. */
12742FNIEMOP_DEF_1(iemOp_fadd_stN, uint8_t, bRm)
12743{
12744 IEMOP_MNEMONIC("fadd st0,stN");
12745 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fadd_r80_by_r80);
12746}
12747
12748
12749/** Opcode 0xd8 11/1. */
12750FNIEMOP_DEF_1(iemOp_fmul_stN, uint8_t, bRm)
12751{
12752 IEMOP_MNEMONIC("fmul st0,stN");
12753 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fmul_r80_by_r80);
12754}
12755
12756
12757/** Opcode 0xd8 11/2. */
12758FNIEMOP_DEF_1(iemOp_fcom_stN, uint8_t, bRm)
12759{
12760 IEMOP_MNEMONIC("fcom st0,stN");
12761 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN, bRm, iemAImpl_fcom_r80_by_r80);
12762}
12763
12764
12765/** Opcode 0xd8 11/3. */
12766FNIEMOP_DEF_1(iemOp_fcomp_stN, uint8_t, bRm)
12767{
12768 IEMOP_MNEMONIC("fcomp st0,stN");
12769 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN_pop, bRm, iemAImpl_fcom_r80_by_r80);
12770}
12771
12772
12773/** Opcode 0xd8 11/4. */
12774FNIEMOP_DEF_1(iemOp_fsub_stN, uint8_t, bRm)
12775{
12776 IEMOP_MNEMONIC("fsub st0,stN");
12777 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fsub_r80_by_r80);
12778}
12779
12780
12781/** Opcode 0xd8 11/5. */
12782FNIEMOP_DEF_1(iemOp_fsubr_stN, uint8_t, bRm)
12783{
12784 IEMOP_MNEMONIC("fsubr st0,stN");
12785 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fsubr_r80_by_r80);
12786}
12787
12788
12789/** Opcode 0xd8 11/6. */
12790FNIEMOP_DEF_1(iemOp_fdiv_stN, uint8_t, bRm)
12791{
12792 IEMOP_MNEMONIC("fdiv st0,stN");
12793 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fdiv_r80_by_r80);
12794}
12795
12796
12797/** Opcode 0xd8 11/7. */
12798FNIEMOP_DEF_1(iemOp_fdivr_stN, uint8_t, bRm)
12799{
12800 IEMOP_MNEMONIC("fdivr st0,stN");
12801 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fdivr_r80_by_r80);
12802}
12803
12804
12805/**
12806 * Common worker for FPU instructions working on ST0 and an m32r, and storing
12807 * the result in ST0.
12808 *
12809 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12810 */
12811FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32r, uint8_t, bRm, PFNIEMAIMPLFPUR32, pfnAImpl)
12812{
12813 IEM_MC_BEGIN(3, 3);
12814 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12815 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
12816 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
12817 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
12818 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12819 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
12820
12821 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12822 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12823
12824 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12825 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12826 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
12827
12828 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
12829 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr32Val2);
12830 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
12831 IEM_MC_ELSE()
12832 IEM_MC_FPU_STACK_UNDERFLOW(0);
12833 IEM_MC_ENDIF();
12834 IEM_MC_USED_FPU();
12835 IEM_MC_ADVANCE_RIP();
12836
12837 IEM_MC_END();
12838 return VINF_SUCCESS;
12839}
12840
12841
12842/** Opcode 0xd8 !11/0. */
12843FNIEMOP_DEF_1(iemOp_fadd_m32r, uint8_t, bRm)
12844{
12845 IEMOP_MNEMONIC("fadd st0,m32r");
12846 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fadd_r80_by_r32);
12847}
12848
12849
12850/** Opcode 0xd8 !11/1. */
12851FNIEMOP_DEF_1(iemOp_fmul_m32r, uint8_t, bRm)
12852{
12853 IEMOP_MNEMONIC("fmul st0,m32r");
12854 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fmul_r80_by_r32);
12855}
12856
12857
12858/** Opcode 0xd8 !11/2. */
12859FNIEMOP_DEF_1(iemOp_fcom_m32r, uint8_t, bRm)
12860{
12861 IEMOP_MNEMONIC("fcom st0,m32r");
12862
12863 IEM_MC_BEGIN(3, 3);
12864 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12865 IEM_MC_LOCAL(uint16_t, u16Fsw);
12866 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
12867 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12868 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12869 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
12870
12871 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12872 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12873
12874 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12875 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12876 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
12877
12878 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
12879 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2);
12880 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
12881 IEM_MC_ELSE()
12882 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
12883 IEM_MC_ENDIF();
12884 IEM_MC_USED_FPU();
12885 IEM_MC_ADVANCE_RIP();
12886
12887 IEM_MC_END();
12888 return VINF_SUCCESS;
12889}
12890
12891
12892/** Opcode 0xd8 !11/3. */
12893FNIEMOP_DEF_1(iemOp_fcomp_m32r, uint8_t, bRm)
12894{
12895 IEMOP_MNEMONIC("fcomp st0,m32r");
12896
12897 IEM_MC_BEGIN(3, 3);
12898 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12899 IEM_MC_LOCAL(uint16_t, u16Fsw);
12900 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
12901 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12902 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12903 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
12904
12905 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12906 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12907
12908 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12909 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12910 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
12911
12912 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
12913 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2);
12914 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
12915 IEM_MC_ELSE()
12916 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
12917 IEM_MC_ENDIF();
12918 IEM_MC_USED_FPU();
12919 IEM_MC_ADVANCE_RIP();
12920
12921 IEM_MC_END();
12922 return VINF_SUCCESS;
12923}
12924
12925
12926/** Opcode 0xd8 !11/4. */
12927FNIEMOP_DEF_1(iemOp_fsub_m32r, uint8_t, bRm)
12928{
12929 IEMOP_MNEMONIC("fsub st0,m32r");
12930 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fsub_r80_by_r32);
12931}
12932
12933
12934/** Opcode 0xd8 !11/5. */
12935FNIEMOP_DEF_1(iemOp_fsubr_m32r, uint8_t, bRm)
12936{
12937 IEMOP_MNEMONIC("fsubr st0,m32r");
12938 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fsubr_r80_by_r32);
12939}
12940
12941
12942/** Opcode 0xd8 !11/6. */
12943FNIEMOP_DEF_1(iemOp_fdiv_m32r, uint8_t, bRm)
12944{
12945 IEMOP_MNEMONIC("fdiv st0,m32r");
12946 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fdiv_r80_by_r32);
12947}
12948
12949
12950/** Opcode 0xd8 !11/7. */
12951FNIEMOP_DEF_1(iemOp_fdivr_m32r, uint8_t, bRm)
12952{
12953 IEMOP_MNEMONIC("fdivr st0,m32r");
12954 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fdivr_r80_by_r32);
12955}
12956
12957
12958/** Opcode 0xd8. */
12959FNIEMOP_DEF(iemOp_EscF0)
12960{
12961 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
12962 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12963
12964 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12965 {
12966 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12967 {
12968 case 0: return FNIEMOP_CALL_1(iemOp_fadd_stN, bRm);
12969 case 1: return FNIEMOP_CALL_1(iemOp_fmul_stN, bRm);
12970 case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm);
12971 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm);
12972 case 4: return FNIEMOP_CALL_1(iemOp_fsub_stN, bRm);
12973 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_stN, bRm);
12974 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_stN, bRm);
12975 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_stN, bRm);
12976 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12977 }
12978 }
12979 else
12980 {
12981 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12982 {
12983 case 0: return FNIEMOP_CALL_1(iemOp_fadd_m32r, bRm);
12984 case 1: return FNIEMOP_CALL_1(iemOp_fmul_m32r, bRm);
12985 case 2: return FNIEMOP_CALL_1(iemOp_fcom_m32r, bRm);
12986 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_m32r, bRm);
12987 case 4: return FNIEMOP_CALL_1(iemOp_fsub_m32r, bRm);
12988 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_m32r, bRm);
12989 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_m32r, bRm);
12990 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_m32r, bRm);
12991 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12992 }
12993 }
12994}
12995
12996
12997/** Opcode 0xd9 /0 mem32real
12998 * @sa iemOp_fld_m64r */
12999FNIEMOP_DEF_1(iemOp_fld_m32r, uint8_t, bRm)
13000{
13001 IEMOP_MNEMONIC("fld m32r");
13002
13003 IEM_MC_BEGIN(2, 3);
13004 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13005 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13006 IEM_MC_LOCAL(RTFLOAT32U, r32Val);
13007 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13008 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val, r32Val, 1);
13009
13010 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13011 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13012
13013 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13014 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13015 IEM_MC_FETCH_MEM_R32(r32Val, pIemCpu->iEffSeg, GCPtrEffSrc);
13016
13017 IEM_MC_IF_FPUREG_IS_EMPTY(7)
13018 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r32_to_r80, pFpuRes, pr32Val);
13019 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
13020 IEM_MC_ELSE()
13021 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
13022 IEM_MC_ENDIF();
13023 IEM_MC_USED_FPU();
13024 IEM_MC_ADVANCE_RIP();
13025
13026 IEM_MC_END();
13027 return VINF_SUCCESS;
13028}
13029
13030
13031/** Opcode 0xd9 !11/2 mem32real */
13032FNIEMOP_DEF_1(iemOp_fst_m32r, uint8_t, bRm)
13033{
13034 IEMOP_MNEMONIC("fst m32r");
13035 IEM_MC_BEGIN(3, 2);
13036 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13037 IEM_MC_LOCAL(uint16_t, u16Fsw);
13038 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13039 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
13040 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
13041
13042 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13043 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13044 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13045 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13046
13047 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
13048 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13049 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value);
13050 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);
13051 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
13052 IEM_MC_ELSE()
13053 IEM_MC_IF_FCW_IM()
13054 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst);
13055 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W);
13056 IEM_MC_ENDIF();
13057 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
13058 IEM_MC_ENDIF();
13059 IEM_MC_USED_FPU();
13060 IEM_MC_ADVANCE_RIP();
13061
13062 IEM_MC_END();
13063 return VINF_SUCCESS;
13064}
13065
13066
13067/** Opcode 0xd9 !11/3 */
13068FNIEMOP_DEF_1(iemOp_fstp_m32r, uint8_t, bRm)
13069{
13070 IEMOP_MNEMONIC("fstp m32r");
13071 IEM_MC_BEGIN(3, 2);
13072 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13073 IEM_MC_LOCAL(uint16_t, u16Fsw);
13074 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13075 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
13076 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
13077
13078 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13079 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13080 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13081 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13082
13083 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
13084 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13085 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value);
13086 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);
13087 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
13088 IEM_MC_ELSE()
13089 IEM_MC_IF_FCW_IM()
13090 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst);
13091 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W);
13092 IEM_MC_ENDIF();
13093 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
13094 IEM_MC_ENDIF();
13095 IEM_MC_USED_FPU();
13096 IEM_MC_ADVANCE_RIP();
13097
13098 IEM_MC_END();
13099 return VINF_SUCCESS;
13100}
13101
13102
13103/** Opcode 0xd9 !11/4 */
13104FNIEMOP_DEF_1(iemOp_fldenv, uint8_t, bRm)
13105{
13106 IEMOP_MNEMONIC("fldenv m14/28byte");
13107 IEM_MC_BEGIN(3, 0);
13108 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
13109 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
13110 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
13111 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13112 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13113 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13114 IEM_MC_CALL_CIMPL_3(iemCImpl_fldenv, enmEffOpSize, iEffSeg, GCPtrEffSrc);
13115 IEM_MC_END();
13116 return VINF_SUCCESS;
13117}
13118
13119
13120/** Opcode 0xd9 !11/5 */
13121FNIEMOP_DEF_1(iemOp_fldcw, uint8_t, bRm)
13122{
13123 IEMOP_MNEMONIC("fldcw m2byte");
13124 IEM_MC_BEGIN(1, 1);
13125 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13126 IEM_MC_ARG(uint16_t, u16Fsw, 0);
13127 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13128 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13129 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13130 IEM_MC_FETCH_MEM_U16(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
13131 IEM_MC_CALL_CIMPL_1(iemCImpl_fldcw, u16Fsw);
13132 IEM_MC_END();
13133 return VINF_SUCCESS;
13134}
13135
13136
13137/** Opcode 0xd9 !11/6 */
13138FNIEMOP_DEF_1(iemOp_fnstenv, uint8_t, bRm)
13139{
13140 IEMOP_MNEMONIC("fstenv m14/m28byte");
13141 IEM_MC_BEGIN(3, 0);
13142 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
13143 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
13144 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
13145 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13146 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13147 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13148 IEM_MC_CALL_CIMPL_3(iemCImpl_fnstenv, enmEffOpSize, iEffSeg, GCPtrEffDst);
13149 IEM_MC_END();
13150 return VINF_SUCCESS;
13151}
13152
13153
13154/** Opcode 0xd9 !11/7 */
13155FNIEMOP_DEF_1(iemOp_fnstcw, uint8_t, bRm)
13156{
13157 IEMOP_MNEMONIC("fnstcw m2byte");
13158 IEM_MC_BEGIN(2, 0);
13159 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13160 IEM_MC_LOCAL(uint16_t, u16Fcw);
13161 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13162 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13163 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13164 IEM_MC_FETCH_FCW(u16Fcw);
13165 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Fcw);
13166 IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
13167 IEM_MC_END();
13168 return VINF_SUCCESS;
13169}
13170
13171
13172/** Opcode 0xd9 0xc9, 0xd9 0xd8-0xdf, ++?. */
13173FNIEMOP_DEF(iemOp_fnop)
13174{
13175 IEMOP_MNEMONIC("fnop");
13176 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13177
13178 IEM_MC_BEGIN(0, 0);
13179 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13180 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13181 /** @todo Testcase: looks like FNOP leaves FOP alone but updates FPUIP. Could be
13182 * intel optimizations. Investigate. */
13183 IEM_MC_UPDATE_FPU_OPCODE_IP();
13184 IEM_MC_USED_FPU();
13185 IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
13186 IEM_MC_END();
13187 return VINF_SUCCESS;
13188}
13189
13190
13191/** Opcode 0xd9 11/0 stN */
13192FNIEMOP_DEF_1(iemOp_fld_stN, uint8_t, bRm)
13193{
13194 IEMOP_MNEMONIC("fld stN");
13195 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13196
13197 /** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
13198 * indicates that it does. */
13199 IEM_MC_BEGIN(0, 2);
13200 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
13201 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13202 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13203 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13204 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, bRm & X86_MODRM_RM_MASK)
13205 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
13206 IEM_MC_PUSH_FPU_RESULT(FpuRes);
13207 IEM_MC_ELSE()
13208 IEM_MC_FPU_STACK_PUSH_UNDERFLOW();
13209 IEM_MC_ENDIF();
13210 IEM_MC_USED_FPU();
13211 IEM_MC_ADVANCE_RIP();
13212 IEM_MC_END();
13213
13214 return VINF_SUCCESS;
13215}
13216
13217
13218/** Opcode 0xd9 11/3 stN */
13219FNIEMOP_DEF_1(iemOp_fxch_stN, uint8_t, bRm)
13220{
13221 IEMOP_MNEMONIC("fxch stN");
13222 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13223
13224 /** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
13225 * indicates that it does. */
13226 IEM_MC_BEGIN(1, 3);
13227 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value1);
13228 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value2);
13229 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13230 IEM_MC_ARG_CONST(uint8_t, iStReg, /*=*/ bRm & X86_MODRM_RM_MASK, 0);
13231 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13232 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13233 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
13234 IEM_MC_SET_FPU_RESULT(FpuRes, X86_FSW_C1, pr80Value2);
13235 IEM_MC_STORE_FPUREG_R80_SRC_REF(bRm & X86_MODRM_RM_MASK, pr80Value1);
13236 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13237 IEM_MC_ELSE()
13238 IEM_MC_CALL_CIMPL_1(iemCImpl_fxch_underflow, iStReg);
13239 IEM_MC_ENDIF();
13240 IEM_MC_USED_FPU();
13241 IEM_MC_ADVANCE_RIP();
13242 IEM_MC_END();
13243
13244 return VINF_SUCCESS;
13245}
13246
13247
13248/** Opcode 0xd9 11/4, 0xdd 11/2. */
13249FNIEMOP_DEF_1(iemOp_fstp_stN, uint8_t, bRm)
13250{
13251 IEMOP_MNEMONIC("fstp st0,stN");
13252 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13253
13254 /* fstp st0, st0 is frequendly used as an official 'ffreep st0' sequence. */
13255 uint8_t const iDstReg = bRm & X86_MODRM_RM_MASK;
13256 if (!iDstReg)
13257 {
13258 IEM_MC_BEGIN(0, 1);
13259 IEM_MC_LOCAL_CONST(uint16_t, u16Fsw, /*=*/ 0);
13260 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13261 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13262 IEM_MC_IF_FPUREG_NOT_EMPTY(0)
13263 IEM_MC_UPDATE_FSW_THEN_POP(u16Fsw);
13264 IEM_MC_ELSE()
13265 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(0);
13266 IEM_MC_ENDIF();
13267 IEM_MC_USED_FPU();
13268 IEM_MC_ADVANCE_RIP();
13269 IEM_MC_END();
13270 }
13271 else
13272 {
13273 IEM_MC_BEGIN(0, 2);
13274 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
13275 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13276 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13277 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13278 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13279 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
13280 IEM_MC_STORE_FPU_RESULT_THEN_POP(FpuRes, iDstReg);
13281 IEM_MC_ELSE()
13282 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(iDstReg);
13283 IEM_MC_ENDIF();
13284 IEM_MC_USED_FPU();
13285 IEM_MC_ADVANCE_RIP();
13286 IEM_MC_END();
13287 }
13288 return VINF_SUCCESS;
13289}
13290
13291
13292/**
13293 * Common worker for FPU instructions working on ST0 and replaces it with the
13294 * result, i.e. unary operators.
13295 *
13296 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13297 */
13298FNIEMOP_DEF_1(iemOpHlpFpu_st0, PFNIEMAIMPLFPUR80UNARY, pfnAImpl)
13299{
13300 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13301
13302 IEM_MC_BEGIN(2, 1);
13303 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13304 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13305 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13306
13307 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13308 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13309 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13310 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pFpuRes, pr80Value);
13311 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13312 IEM_MC_ELSE()
13313 IEM_MC_FPU_STACK_UNDERFLOW(0);
13314 IEM_MC_ENDIF();
13315 IEM_MC_USED_FPU();
13316 IEM_MC_ADVANCE_RIP();
13317
13318 IEM_MC_END();
13319 return VINF_SUCCESS;
13320}
13321
13322
13323/** Opcode 0xd9 0xe0. */
13324FNIEMOP_DEF(iemOp_fchs)
13325{
13326 IEMOP_MNEMONIC("fchs st0");
13327 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fchs_r80);
13328}
13329
13330
13331/** Opcode 0xd9 0xe1. */
13332FNIEMOP_DEF(iemOp_fabs)
13333{
13334 IEMOP_MNEMONIC("fabs st0");
13335 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fabs_r80);
13336}
13337
13338
13339/**
13340 * Common worker for FPU instructions working on ST0 and only returns FSW.
13341 *
13342 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13343 */
13344FNIEMOP_DEF_1(iemOpHlpFpuNoStore_st0, PFNIEMAIMPLFPUR80UNARYFSW, pfnAImpl)
13345{
13346 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13347
13348 IEM_MC_BEGIN(2, 1);
13349 IEM_MC_LOCAL(uint16_t, u16Fsw);
13350 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13351 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13352
13353 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13354 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13355 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13356 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pu16Fsw, pr80Value);
13357 IEM_MC_UPDATE_FSW(u16Fsw);
13358 IEM_MC_ELSE()
13359 IEM_MC_FPU_STACK_UNDERFLOW(UINT8_MAX);
13360 IEM_MC_ENDIF();
13361 IEM_MC_USED_FPU();
13362 IEM_MC_ADVANCE_RIP();
13363
13364 IEM_MC_END();
13365 return VINF_SUCCESS;
13366}
13367
13368
13369/** Opcode 0xd9 0xe4. */
13370FNIEMOP_DEF(iemOp_ftst)
13371{
13372 IEMOP_MNEMONIC("ftst st0");
13373 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0, iemAImpl_ftst_r80);
13374}
13375
13376
13377/** Opcode 0xd9 0xe5. */
13378FNIEMOP_DEF(iemOp_fxam)
13379{
13380 IEMOP_MNEMONIC("fxam st0");
13381 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0, iemAImpl_fxam_r80);
13382}
13383
13384
13385/**
13386 * Common worker for FPU instructions pushing a constant onto the FPU stack.
13387 *
13388 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13389 */
13390FNIEMOP_DEF_1(iemOpHlpFpuPushConstant, PFNIEMAIMPLFPUR80LDCONST, pfnAImpl)
13391{
13392 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13393
13394 IEM_MC_BEGIN(1, 1);
13395 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13396 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13397
13398 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13399 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13400 IEM_MC_IF_FPUREG_IS_EMPTY(7)
13401 IEM_MC_CALL_FPU_AIMPL_1(pfnAImpl, pFpuRes);
13402 IEM_MC_PUSH_FPU_RESULT(FpuRes);
13403 IEM_MC_ELSE()
13404 IEM_MC_FPU_STACK_PUSH_OVERFLOW();
13405 IEM_MC_ENDIF();
13406 IEM_MC_USED_FPU();
13407 IEM_MC_ADVANCE_RIP();
13408
13409 IEM_MC_END();
13410 return VINF_SUCCESS;
13411}
13412
13413
13414/** Opcode 0xd9 0xe8. */
13415FNIEMOP_DEF(iemOp_fld1)
13416{
13417 IEMOP_MNEMONIC("fld1");
13418 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fld1);
13419}
13420
13421
13422/** Opcode 0xd9 0xe9. */
13423FNIEMOP_DEF(iemOp_fldl2t)
13424{
13425 IEMOP_MNEMONIC("fldl2t");
13426 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldl2t);
13427}
13428
13429
13430/** Opcode 0xd9 0xea. */
13431FNIEMOP_DEF(iemOp_fldl2e)
13432{
13433 IEMOP_MNEMONIC("fldl2e");
13434 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldl2e);
13435}
13436
13437/** Opcode 0xd9 0xeb. */
13438FNIEMOP_DEF(iemOp_fldpi)
13439{
13440 IEMOP_MNEMONIC("fldpi");
13441 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldpi);
13442}
13443
13444
13445/** Opcode 0xd9 0xec. */
13446FNIEMOP_DEF(iemOp_fldlg2)
13447{
13448 IEMOP_MNEMONIC("fldlg2");
13449 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldlg2);
13450}
13451
13452/** Opcode 0xd9 0xed. */
13453FNIEMOP_DEF(iemOp_fldln2)
13454{
13455 IEMOP_MNEMONIC("fldln2");
13456 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldln2);
13457}
13458
13459
13460/** Opcode 0xd9 0xee. */
13461FNIEMOP_DEF(iemOp_fldz)
13462{
13463 IEMOP_MNEMONIC("fldz");
13464 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldz);
13465}
13466
13467
13468/** Opcode 0xd9 0xf0. */
13469FNIEMOP_DEF(iemOp_f2xm1)
13470{
13471 IEMOP_MNEMONIC("f2xm1 st0");
13472 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_f2xm1_r80);
13473}
13474
13475
13476/** Opcode 0xd9 0xf1. */
13477FNIEMOP_DEF(iemOp_fylx2)
13478{
13479 IEMOP_MNEMONIC("fylx2 st0");
13480 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fyl2x_r80);
13481}
13482
13483
13484/**
13485 * Common worker for FPU instructions working on ST0 and having two outputs, one
13486 * replacing ST0 and one pushed onto the stack.
13487 *
13488 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13489 */
13490FNIEMOP_DEF_1(iemOpHlpFpuReplace_st0_push, PFNIEMAIMPLFPUR80UNARYTWO, pfnAImpl)
13491{
13492 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13493
13494 IEM_MC_BEGIN(2, 1);
13495 IEM_MC_LOCAL(IEMFPURESULTTWO, FpuResTwo);
13496 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULTTWO, pFpuResTwo, FpuResTwo, 0);
13497 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13498
13499 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13500 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13501 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13502 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pFpuResTwo, pr80Value);
13503 IEM_MC_PUSH_FPU_RESULT_TWO(FpuResTwo);
13504 IEM_MC_ELSE()
13505 IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO();
13506 IEM_MC_ENDIF();
13507 IEM_MC_USED_FPU();
13508 IEM_MC_ADVANCE_RIP();
13509
13510 IEM_MC_END();
13511 return VINF_SUCCESS;
13512}
13513
13514
13515/** Opcode 0xd9 0xf2. */
13516FNIEMOP_DEF(iemOp_fptan)
13517{
13518 IEMOP_MNEMONIC("fptan st0");
13519 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fptan_r80_r80);
13520}
13521
13522
13523/**
13524 * Common worker for FPU instructions working on STn and ST0, storing the result
13525 * in STn, and popping the stack unless IE, DE or ZE was raised.
13526 *
13527 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13528 */
13529FNIEMOP_DEF_2(iemOpHlpFpu_stN_st0_pop, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
13530{
13531 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13532
13533 IEM_MC_BEGIN(3, 1);
13534 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13535 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13536 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13537 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
13538
13539 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13540 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13541
13542 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, bRm & X86_MODRM_RM_MASK, pr80Value2, 0)
13543 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
13544 IEM_MC_STORE_FPU_RESULT_THEN_POP(FpuRes, bRm & X86_MODRM_RM_MASK);
13545 IEM_MC_ELSE()
13546 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(bRm & X86_MODRM_RM_MASK);
13547 IEM_MC_ENDIF();
13548 IEM_MC_USED_FPU();
13549 IEM_MC_ADVANCE_RIP();
13550
13551 IEM_MC_END();
13552 return VINF_SUCCESS;
13553}
13554
13555
13556/** Opcode 0xd9 0xf3. */
13557FNIEMOP_DEF(iemOp_fpatan)
13558{
13559 IEMOP_MNEMONIC("fpatan st1,st0");
13560 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, 1, iemAImpl_fpatan_r80_by_r80);
13561}
13562
13563
13564/** Opcode 0xd9 0xf4. */
13565FNIEMOP_DEF(iemOp_fxtract)
13566{
13567 IEMOP_MNEMONIC("fxtract st0");
13568 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fxtract_r80_r80);
13569}
13570
13571
13572/** Opcode 0xd9 0xf5. */
13573FNIEMOP_DEF(iemOp_fprem1)
13574{
13575 IEMOP_MNEMONIC("fprem1 st0, st1");
13576 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fprem1_r80_by_r80);
13577}
13578
13579
13580/** Opcode 0xd9 0xf6. */
13581FNIEMOP_DEF(iemOp_fdecstp)
13582{
13583 IEMOP_MNEMONIC("fdecstp");
13584 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13585 /* Note! C0, C2 and C3 are documented as undefined, we clear them. */
13586 /** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
13587 * FINCSTP and FDECSTP. */
13588
13589 IEM_MC_BEGIN(0,0);
13590
13591 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13592 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13593
13594 IEM_MC_FPU_STACK_DEC_TOP();
13595 IEM_MC_UPDATE_FSW_CONST(0);
13596
13597 IEM_MC_USED_FPU();
13598 IEM_MC_ADVANCE_RIP();
13599 IEM_MC_END();
13600 return VINF_SUCCESS;
13601}
13602
13603
13604/** Opcode 0xd9 0xf7. */
13605FNIEMOP_DEF(iemOp_fincstp)
13606{
13607 IEMOP_MNEMONIC("fincstp");
13608 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13609 /* Note! C0, C2 and C3 are documented as undefined, we clear them. */
13610 /** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
13611 * FINCSTP and FDECSTP. */
13612
13613 IEM_MC_BEGIN(0,0);
13614
13615 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13616 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13617
13618 IEM_MC_FPU_STACK_INC_TOP();
13619 IEM_MC_UPDATE_FSW_CONST(0);
13620
13621 IEM_MC_USED_FPU();
13622 IEM_MC_ADVANCE_RIP();
13623 IEM_MC_END();
13624 return VINF_SUCCESS;
13625}
13626
13627
13628/** Opcode 0xd9 0xf8. */
13629FNIEMOP_DEF(iemOp_fprem)
13630{
13631 IEMOP_MNEMONIC("fprem st0, st1");
13632 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fprem_r80_by_r80);
13633}
13634
13635
13636/** Opcode 0xd9 0xf9. */
13637FNIEMOP_DEF(iemOp_fyl2xp1)
13638{
13639 IEMOP_MNEMONIC("fyl2xp1 st1,st0");
13640 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, 1, iemAImpl_fyl2xp1_r80_by_r80);
13641}
13642
13643
13644/** Opcode 0xd9 0xfa. */
13645FNIEMOP_DEF(iemOp_fsqrt)
13646{
13647 IEMOP_MNEMONIC("fsqrt st0");
13648 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fsqrt_r80);
13649}
13650
13651
13652/** Opcode 0xd9 0xfb. */
13653FNIEMOP_DEF(iemOp_fsincos)
13654{
13655 IEMOP_MNEMONIC("fsincos st0");
13656 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fsincos_r80_r80);
13657}
13658
13659
13660/** Opcode 0xd9 0xfc. */
13661FNIEMOP_DEF(iemOp_frndint)
13662{
13663 IEMOP_MNEMONIC("frndint st0");
13664 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_frndint_r80);
13665}
13666
13667
13668/** Opcode 0xd9 0xfd. */
13669FNIEMOP_DEF(iemOp_fscale)
13670{
13671 IEMOP_MNEMONIC("fscale st0, st1");
13672 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fscale_r80_by_r80);
13673}
13674
13675
13676/** Opcode 0xd9 0xfe. */
13677FNIEMOP_DEF(iemOp_fsin)
13678{
13679 IEMOP_MNEMONIC("fsin st0");
13680 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fsin_r80);
13681}
13682
13683
13684/** Opcode 0xd9 0xff. */
13685FNIEMOP_DEF(iemOp_fcos)
13686{
13687 IEMOP_MNEMONIC("fcos st0");
13688 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fcos_r80);
13689}
13690
13691
13692/** Used by iemOp_EscF1. */
13693static const PFNIEMOP g_apfnEscF1_E0toFF[32] =
13694{
13695 /* 0xe0 */ iemOp_fchs,
13696 /* 0xe1 */ iemOp_fabs,
13697 /* 0xe2 */ iemOp_Invalid,
13698 /* 0xe3 */ iemOp_Invalid,
13699 /* 0xe4 */ iemOp_ftst,
13700 /* 0xe5 */ iemOp_fxam,
13701 /* 0xe6 */ iemOp_Invalid,
13702 /* 0xe7 */ iemOp_Invalid,
13703 /* 0xe8 */ iemOp_fld1,
13704 /* 0xe9 */ iemOp_fldl2t,
13705 /* 0xea */ iemOp_fldl2e,
13706 /* 0xeb */ iemOp_fldpi,
13707 /* 0xec */ iemOp_fldlg2,
13708 /* 0xed */ iemOp_fldln2,
13709 /* 0xee */ iemOp_fldz,
13710 /* 0xef */ iemOp_Invalid,
13711 /* 0xf0 */ iemOp_f2xm1,
13712 /* 0xf1 */ iemOp_fylx2,
13713 /* 0xf2 */ iemOp_fptan,
13714 /* 0xf3 */ iemOp_fpatan,
13715 /* 0xf4 */ iemOp_fxtract,
13716 /* 0xf5 */ iemOp_fprem1,
13717 /* 0xf6 */ iemOp_fdecstp,
13718 /* 0xf7 */ iemOp_fincstp,
13719 /* 0xf8 */ iemOp_fprem,
13720 /* 0xf9 */ iemOp_fyl2xp1,
13721 /* 0xfa */ iemOp_fsqrt,
13722 /* 0xfb */ iemOp_fsincos,
13723 /* 0xfc */ iemOp_frndint,
13724 /* 0xfd */ iemOp_fscale,
13725 /* 0xfe */ iemOp_fsin,
13726 /* 0xff */ iemOp_fcos
13727};
13728
13729
13730/** Opcode 0xd9. */
13731FNIEMOP_DEF(iemOp_EscF1)
13732{
13733 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
13734 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
13735 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
13736 {
13737 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
13738 {
13739 case 0: return FNIEMOP_CALL_1(iemOp_fld_stN, bRm);
13740 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm);
13741 case 2:
13742 if (bRm == 0xc9)
13743 return FNIEMOP_CALL(iemOp_fnop);
13744 return IEMOP_RAISE_INVALID_OPCODE();
13745 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved. Intel behavior seems to be FSTP ST(i) though. */
13746 case 4:
13747 case 5:
13748 case 6:
13749 case 7:
13750 Assert((unsigned)bRm - 0xe0U < RT_ELEMENTS(g_apfnEscF1_E0toFF));
13751 return FNIEMOP_CALL(g_apfnEscF1_E0toFF[bRm - 0xe0]);
13752 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13753 }
13754 }
13755 else
13756 {
13757 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
13758 {
13759 case 0: return FNIEMOP_CALL_1(iemOp_fld_m32r, bRm);
13760 case 1: return IEMOP_RAISE_INVALID_OPCODE();
13761 case 2: return FNIEMOP_CALL_1(iemOp_fst_m32r, bRm);
13762 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m32r, bRm);
13763 case 4: return FNIEMOP_CALL_1(iemOp_fldenv, bRm);
13764 case 5: return FNIEMOP_CALL_1(iemOp_fldcw, bRm);
13765 case 6: return FNIEMOP_CALL_1(iemOp_fnstenv, bRm);
13766 case 7: return FNIEMOP_CALL_1(iemOp_fnstcw, bRm);
13767 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13768 }
13769 }
13770}
13771
13772
13773/** Opcode 0xda 11/0. */
13774FNIEMOP_DEF_1(iemOp_fcmovb_stN, uint8_t, bRm)
13775{
13776 IEMOP_MNEMONIC("fcmovb st0,stN");
13777 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13778
13779 IEM_MC_BEGIN(0, 1);
13780 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13781
13782 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13783 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13784
13785 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13786 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF)
13787 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13788 IEM_MC_ENDIF();
13789 IEM_MC_UPDATE_FPU_OPCODE_IP();
13790 IEM_MC_ELSE()
13791 IEM_MC_FPU_STACK_UNDERFLOW(0);
13792 IEM_MC_ENDIF();
13793 IEM_MC_USED_FPU();
13794 IEM_MC_ADVANCE_RIP();
13795
13796 IEM_MC_END();
13797 return VINF_SUCCESS;
13798}
13799
13800
13801/** Opcode 0xda 11/1. */
13802FNIEMOP_DEF_1(iemOp_fcmove_stN, uint8_t, bRm)
13803{
13804 IEMOP_MNEMONIC("fcmove st0,stN");
13805 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13806
13807 IEM_MC_BEGIN(0, 1);
13808 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13809
13810 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13811 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13812
13813 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13814 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF)
13815 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13816 IEM_MC_ENDIF();
13817 IEM_MC_UPDATE_FPU_OPCODE_IP();
13818 IEM_MC_ELSE()
13819 IEM_MC_FPU_STACK_UNDERFLOW(0);
13820 IEM_MC_ENDIF();
13821 IEM_MC_USED_FPU();
13822 IEM_MC_ADVANCE_RIP();
13823
13824 IEM_MC_END();
13825 return VINF_SUCCESS;
13826}
13827
13828
13829/** Opcode 0xda 11/2. */
13830FNIEMOP_DEF_1(iemOp_fcmovbe_stN, uint8_t, bRm)
13831{
13832 IEMOP_MNEMONIC("fcmovbe st0,stN");
13833 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13834
13835 IEM_MC_BEGIN(0, 1);
13836 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13837
13838 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13839 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13840
13841 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13842 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF)
13843 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13844 IEM_MC_ENDIF();
13845 IEM_MC_UPDATE_FPU_OPCODE_IP();
13846 IEM_MC_ELSE()
13847 IEM_MC_FPU_STACK_UNDERFLOW(0);
13848 IEM_MC_ENDIF();
13849 IEM_MC_USED_FPU();
13850 IEM_MC_ADVANCE_RIP();
13851
13852 IEM_MC_END();
13853 return VINF_SUCCESS;
13854}
13855
13856
13857/** Opcode 0xda 11/3. */
13858FNIEMOP_DEF_1(iemOp_fcmovu_stN, uint8_t, bRm)
13859{
13860 IEMOP_MNEMONIC("fcmovu st0,stN");
13861 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13862
13863 IEM_MC_BEGIN(0, 1);
13864 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13865
13866 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13867 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13868
13869 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13870 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF)
13871 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13872 IEM_MC_ENDIF();
13873 IEM_MC_UPDATE_FPU_OPCODE_IP();
13874 IEM_MC_ELSE()
13875 IEM_MC_FPU_STACK_UNDERFLOW(0);
13876 IEM_MC_ENDIF();
13877 IEM_MC_USED_FPU();
13878 IEM_MC_ADVANCE_RIP();
13879
13880 IEM_MC_END();
13881 return VINF_SUCCESS;
13882}
13883
13884
13885/**
13886 * Common worker for FPU instructions working on ST0 and STn, only affecting
13887 * flags, and popping twice when done.
13888 *
13889 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13890 */
13891FNIEMOP_DEF_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
13892{
13893 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13894
13895 IEM_MC_BEGIN(3, 1);
13896 IEM_MC_LOCAL(uint16_t, u16Fsw);
13897 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13898 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13899 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
13900
13901 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13902 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13903 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, 1)
13904 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
13905 IEM_MC_UPDATE_FSW_THEN_POP_POP(u16Fsw);
13906 IEM_MC_ELSE()
13907 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP();
13908 IEM_MC_ENDIF();
13909 IEM_MC_USED_FPU();
13910 IEM_MC_ADVANCE_RIP();
13911
13912 IEM_MC_END();
13913 return VINF_SUCCESS;
13914}
13915
13916
13917/** Opcode 0xda 0xe9. */
13918FNIEMOP_DEF(iemOp_fucompp)
13919{
13920 IEMOP_MNEMONIC("fucompp st0,stN");
13921 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, iemAImpl_fucom_r80_by_r80);
13922}
13923
13924
13925/**
13926 * Common worker for FPU instructions working on ST0 and an m32i, and storing
13927 * the result in ST0.
13928 *
13929 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13930 */
13931FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32i, uint8_t, bRm, PFNIEMAIMPLFPUI32, pfnAImpl)
13932{
13933 IEM_MC_BEGIN(3, 3);
13934 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13935 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13936 IEM_MC_LOCAL(int32_t, i32Val2);
13937 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13938 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13939 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
13940
13941 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13942 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13943
13944 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13945 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13946 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
13947
13948 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
13949 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pi32Val2);
13950 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13951 IEM_MC_ELSE()
13952 IEM_MC_FPU_STACK_UNDERFLOW(0);
13953 IEM_MC_ENDIF();
13954 IEM_MC_USED_FPU();
13955 IEM_MC_ADVANCE_RIP();
13956
13957 IEM_MC_END();
13958 return VINF_SUCCESS;
13959}
13960
13961
13962/** Opcode 0xda !11/0. */
13963FNIEMOP_DEF_1(iemOp_fiadd_m32i, uint8_t, bRm)
13964{
13965 IEMOP_MNEMONIC("fiadd m32i");
13966 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fiadd_r80_by_i32);
13967}
13968
13969
13970/** Opcode 0xda !11/1. */
13971FNIEMOP_DEF_1(iemOp_fimul_m32i, uint8_t, bRm)
13972{
13973 IEMOP_MNEMONIC("fimul m32i");
13974 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fimul_r80_by_i32);
13975}
13976
13977
13978/** Opcode 0xda !11/2. */
13979FNIEMOP_DEF_1(iemOp_ficom_m32i, uint8_t, bRm)
13980{
13981 IEMOP_MNEMONIC("ficom st0,m32i");
13982
13983 IEM_MC_BEGIN(3, 3);
13984 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13985 IEM_MC_LOCAL(uint16_t, u16Fsw);
13986 IEM_MC_LOCAL(int32_t, i32Val2);
13987 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13988 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13989 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
13990
13991 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13992 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13993
13994 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13995 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13996 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
13997
13998 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
13999 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2);
14000 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14001 IEM_MC_ELSE()
14002 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14003 IEM_MC_ENDIF();
14004 IEM_MC_USED_FPU();
14005 IEM_MC_ADVANCE_RIP();
14006
14007 IEM_MC_END();
14008 return VINF_SUCCESS;
14009}
14010
14011
14012/** Opcode 0xda !11/3. */
14013FNIEMOP_DEF_1(iemOp_ficomp_m32i, uint8_t, bRm)
14014{
14015 IEMOP_MNEMONIC("ficomp st0,m32i");
14016
14017 IEM_MC_BEGIN(3, 3);
14018 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14019 IEM_MC_LOCAL(uint16_t, u16Fsw);
14020 IEM_MC_LOCAL(int32_t, i32Val2);
14021 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14022 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14023 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
14024
14025 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14026 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14027
14028 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14029 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14030 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14031
14032 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14033 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2);
14034 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14035 IEM_MC_ELSE()
14036 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14037 IEM_MC_ENDIF();
14038 IEM_MC_USED_FPU();
14039 IEM_MC_ADVANCE_RIP();
14040
14041 IEM_MC_END();
14042 return VINF_SUCCESS;
14043}
14044
14045
14046/** Opcode 0xda !11/4. */
14047FNIEMOP_DEF_1(iemOp_fisub_m32i, uint8_t, bRm)
14048{
14049 IEMOP_MNEMONIC("fisub m32i");
14050 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fisub_r80_by_i32);
14051}
14052
14053
14054/** Opcode 0xda !11/5. */
14055FNIEMOP_DEF_1(iemOp_fisubr_m32i, uint8_t, bRm)
14056{
14057 IEMOP_MNEMONIC("fisubr m32i");
14058 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fisubr_r80_by_i32);
14059}
14060
14061
14062/** Opcode 0xda !11/6. */
14063FNIEMOP_DEF_1(iemOp_fidiv_m32i, uint8_t, bRm)
14064{
14065 IEMOP_MNEMONIC("fidiv m32i");
14066 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fidiv_r80_by_i32);
14067}
14068
14069
14070/** Opcode 0xda !11/7. */
14071FNIEMOP_DEF_1(iemOp_fidivr_m32i, uint8_t, bRm)
14072{
14073 IEMOP_MNEMONIC("fidivr m32i");
14074 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fidivr_r80_by_i32);
14075}
14076
14077
14078/** Opcode 0xda. */
14079FNIEMOP_DEF(iemOp_EscF2)
14080{
14081 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14082 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14083 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14084 {
14085 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14086 {
14087 case 0: return FNIEMOP_CALL_1(iemOp_fcmovb_stN, bRm);
14088 case 1: return FNIEMOP_CALL_1(iemOp_fcmove_stN, bRm);
14089 case 2: return FNIEMOP_CALL_1(iemOp_fcmovbe_stN, bRm);
14090 case 3: return FNIEMOP_CALL_1(iemOp_fcmovu_stN, bRm);
14091 case 4: return IEMOP_RAISE_INVALID_OPCODE();
14092 case 5:
14093 if (bRm == 0xe9)
14094 return FNIEMOP_CALL(iemOp_fucompp);
14095 return IEMOP_RAISE_INVALID_OPCODE();
14096 case 6: return IEMOP_RAISE_INVALID_OPCODE();
14097 case 7: return IEMOP_RAISE_INVALID_OPCODE();
14098 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14099 }
14100 }
14101 else
14102 {
14103 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14104 {
14105 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_m32i, bRm);
14106 case 1: return FNIEMOP_CALL_1(iemOp_fimul_m32i, bRm);
14107 case 2: return FNIEMOP_CALL_1(iemOp_ficom_m32i, bRm);
14108 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_m32i, bRm);
14109 case 4: return FNIEMOP_CALL_1(iemOp_fisub_m32i, bRm);
14110 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_m32i, bRm);
14111 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_m32i, bRm);
14112 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_m32i, bRm);
14113 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14114 }
14115 }
14116}
14117
14118
14119/** Opcode 0xdb !11/0. */
14120FNIEMOP_DEF_1(iemOp_fild_m32i, uint8_t, bRm)
14121{
14122 IEMOP_MNEMONIC("fild m32i");
14123
14124 IEM_MC_BEGIN(2, 3);
14125 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14126 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14127 IEM_MC_LOCAL(int32_t, i32Val);
14128 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14129 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val, i32Val, 1);
14130
14131 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14132 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14133
14134 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14135 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14136 IEM_MC_FETCH_MEM_I32(i32Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14137
14138 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14139 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fild_i32_to_r80, pFpuRes, pi32Val);
14140 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14141 IEM_MC_ELSE()
14142 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14143 IEM_MC_ENDIF();
14144 IEM_MC_USED_FPU();
14145 IEM_MC_ADVANCE_RIP();
14146
14147 IEM_MC_END();
14148 return VINF_SUCCESS;
14149}
14150
14151
14152/** Opcode 0xdb !11/1. */
14153FNIEMOP_DEF_1(iemOp_fisttp_m32i, uint8_t, bRm)
14154{
14155 IEMOP_MNEMONIC("fisttp m32i");
14156 IEM_MC_BEGIN(3, 2);
14157 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14158 IEM_MC_LOCAL(uint16_t, u16Fsw);
14159 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14160 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14161 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14162
14163 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14164 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14165 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14166 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14167
14168 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14169 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14170 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14171 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14172 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14173 IEM_MC_ELSE()
14174 IEM_MC_IF_FCW_IM()
14175 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14176 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14177 IEM_MC_ENDIF();
14178 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14179 IEM_MC_ENDIF();
14180 IEM_MC_USED_FPU();
14181 IEM_MC_ADVANCE_RIP();
14182
14183 IEM_MC_END();
14184 return VINF_SUCCESS;
14185}
14186
14187
14188/** Opcode 0xdb !11/2. */
14189FNIEMOP_DEF_1(iemOp_fist_m32i, uint8_t, bRm)
14190{
14191 IEMOP_MNEMONIC("fist m32i");
14192 IEM_MC_BEGIN(3, 2);
14193 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14194 IEM_MC_LOCAL(uint16_t, u16Fsw);
14195 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14196 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14197 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14198
14199 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14200 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14201 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14202 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14203
14204 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14205 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14206 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14207 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14208 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14209 IEM_MC_ELSE()
14210 IEM_MC_IF_FCW_IM()
14211 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14212 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14213 IEM_MC_ENDIF();
14214 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14215 IEM_MC_ENDIF();
14216 IEM_MC_USED_FPU();
14217 IEM_MC_ADVANCE_RIP();
14218
14219 IEM_MC_END();
14220 return VINF_SUCCESS;
14221}
14222
14223
14224/** Opcode 0xdb !11/3. */
14225FNIEMOP_DEF_1(iemOp_fistp_m32i, uint8_t, bRm)
14226{
14227 IEMOP_MNEMONIC("fisttp m32i");
14228 IEM_MC_BEGIN(3, 2);
14229 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14230 IEM_MC_LOCAL(uint16_t, u16Fsw);
14231 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14232 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14233 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14234
14235 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14236 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14237 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14238 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14239
14240 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14241 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14242 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14243 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14244 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14245 IEM_MC_ELSE()
14246 IEM_MC_IF_FCW_IM()
14247 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14248 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14249 IEM_MC_ENDIF();
14250 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14251 IEM_MC_ENDIF();
14252 IEM_MC_USED_FPU();
14253 IEM_MC_ADVANCE_RIP();
14254
14255 IEM_MC_END();
14256 return VINF_SUCCESS;
14257}
14258
14259
14260/** Opcode 0xdb !11/5. */
14261FNIEMOP_DEF_1(iemOp_fld_m80r, uint8_t, bRm)
14262{
14263 IEMOP_MNEMONIC("fld m80r");
14264
14265 IEM_MC_BEGIN(2, 3);
14266 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14267 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14268 IEM_MC_LOCAL(RTFLOAT80U, r80Val);
14269 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14270 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT80U, pr80Val, r80Val, 1);
14271
14272 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14273 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14274
14275 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14276 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14277 IEM_MC_FETCH_MEM_R80(r80Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14278
14279 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14280 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r80_from_r80, pFpuRes, pr80Val);
14281 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14282 IEM_MC_ELSE()
14283 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14284 IEM_MC_ENDIF();
14285 IEM_MC_USED_FPU();
14286 IEM_MC_ADVANCE_RIP();
14287
14288 IEM_MC_END();
14289 return VINF_SUCCESS;
14290}
14291
14292
14293/** Opcode 0xdb !11/7. */
14294FNIEMOP_DEF_1(iemOp_fstp_m80r, uint8_t, bRm)
14295{
14296 IEMOP_MNEMONIC("fstp m80r");
14297 IEM_MC_BEGIN(3, 2);
14298 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14299 IEM_MC_LOCAL(uint16_t, u16Fsw);
14300 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14301 IEM_MC_ARG(PRTFLOAT80U, pr80Dst, 1);
14302 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14303
14304 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14305 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14306 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14307 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14308
14309 IEM_MC_MEM_MAP(pr80Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14310 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14311 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r80, pu16Fsw, pr80Dst, pr80Value);
14312 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr80Dst, IEM_ACCESS_DATA_W, u16Fsw);
14313 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14314 IEM_MC_ELSE()
14315 IEM_MC_IF_FCW_IM()
14316 IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(pr80Dst);
14317 IEM_MC_MEM_COMMIT_AND_UNMAP(pr80Dst, IEM_ACCESS_DATA_W);
14318 IEM_MC_ENDIF();
14319 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14320 IEM_MC_ENDIF();
14321 IEM_MC_USED_FPU();
14322 IEM_MC_ADVANCE_RIP();
14323
14324 IEM_MC_END();
14325 return VINF_SUCCESS;
14326}
14327
14328
14329/** Opcode 0xdb 11/0. */
14330FNIEMOP_DEF_1(iemOp_fcmovnb_stN, uint8_t, bRm)
14331{
14332 IEMOP_MNEMONIC("fcmovnb st0,stN");
14333 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14334
14335 IEM_MC_BEGIN(0, 1);
14336 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14337
14338 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14339 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14340
14341 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14342 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF)
14343 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14344 IEM_MC_ENDIF();
14345 IEM_MC_UPDATE_FPU_OPCODE_IP();
14346 IEM_MC_ELSE()
14347 IEM_MC_FPU_STACK_UNDERFLOW(0);
14348 IEM_MC_ENDIF();
14349 IEM_MC_USED_FPU();
14350 IEM_MC_ADVANCE_RIP();
14351
14352 IEM_MC_END();
14353 return VINF_SUCCESS;
14354}
14355
14356
14357/** Opcode 0xdb 11/1. */
14358FNIEMOP_DEF_1(iemOp_fcmovne_stN, uint8_t, bRm)
14359{
14360 IEMOP_MNEMONIC("fcmovne st0,stN");
14361 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14362
14363 IEM_MC_BEGIN(0, 1);
14364 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14365
14366 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14367 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14368
14369 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14370 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF)
14371 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14372 IEM_MC_ENDIF();
14373 IEM_MC_UPDATE_FPU_OPCODE_IP();
14374 IEM_MC_ELSE()
14375 IEM_MC_FPU_STACK_UNDERFLOW(0);
14376 IEM_MC_ENDIF();
14377 IEM_MC_USED_FPU();
14378 IEM_MC_ADVANCE_RIP();
14379
14380 IEM_MC_END();
14381 return VINF_SUCCESS;
14382}
14383
14384
14385/** Opcode 0xdb 11/2. */
14386FNIEMOP_DEF_1(iemOp_fcmovnbe_stN, uint8_t, bRm)
14387{
14388 IEMOP_MNEMONIC("fcmovnbe st0,stN");
14389 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14390
14391 IEM_MC_BEGIN(0, 1);
14392 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14393
14394 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14395 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14396
14397 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14398 IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF)
14399 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14400 IEM_MC_ENDIF();
14401 IEM_MC_UPDATE_FPU_OPCODE_IP();
14402 IEM_MC_ELSE()
14403 IEM_MC_FPU_STACK_UNDERFLOW(0);
14404 IEM_MC_ENDIF();
14405 IEM_MC_USED_FPU();
14406 IEM_MC_ADVANCE_RIP();
14407
14408 IEM_MC_END();
14409 return VINF_SUCCESS;
14410}
14411
14412
14413/** Opcode 0xdb 11/3. */
14414FNIEMOP_DEF_1(iemOp_fcmovnnu_stN, uint8_t, bRm)
14415{
14416 IEMOP_MNEMONIC("fcmovnnu st0,stN");
14417 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14418
14419 IEM_MC_BEGIN(0, 1);
14420 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14421
14422 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14423 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14424
14425 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14426 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF)
14427 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14428 IEM_MC_ENDIF();
14429 IEM_MC_UPDATE_FPU_OPCODE_IP();
14430 IEM_MC_ELSE()
14431 IEM_MC_FPU_STACK_UNDERFLOW(0);
14432 IEM_MC_ENDIF();
14433 IEM_MC_USED_FPU();
14434 IEM_MC_ADVANCE_RIP();
14435
14436 IEM_MC_END();
14437 return VINF_SUCCESS;
14438}
14439
14440
14441/** Opcode 0xdb 0xe0. */
14442FNIEMOP_DEF(iemOp_fneni)
14443{
14444 IEMOP_MNEMONIC("fneni (8087/ign)");
14445 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14446 IEM_MC_BEGIN(0,0);
14447 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14448 IEM_MC_ADVANCE_RIP();
14449 IEM_MC_END();
14450 return VINF_SUCCESS;
14451}
14452
14453
14454/** Opcode 0xdb 0xe1. */
14455FNIEMOP_DEF(iemOp_fndisi)
14456{
14457 IEMOP_MNEMONIC("fndisi (8087/ign)");
14458 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14459 IEM_MC_BEGIN(0,0);
14460 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14461 IEM_MC_ADVANCE_RIP();
14462 IEM_MC_END();
14463 return VINF_SUCCESS;
14464}
14465
14466
14467/** Opcode 0xdb 0xe2. */
14468FNIEMOP_DEF(iemOp_fnclex)
14469{
14470 IEMOP_MNEMONIC("fnclex");
14471 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14472
14473 IEM_MC_BEGIN(0,0);
14474 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14475 IEM_MC_CLEAR_FSW_EX();
14476 IEM_MC_ADVANCE_RIP();
14477 IEM_MC_END();
14478 return VINF_SUCCESS;
14479}
14480
14481
14482/** Opcode 0xdb 0xe3. */
14483FNIEMOP_DEF(iemOp_fninit)
14484{
14485 IEMOP_MNEMONIC("fninit");
14486 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14487 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
14488}
14489
14490
14491/** Opcode 0xdb 0xe4. */
14492FNIEMOP_DEF(iemOp_fnsetpm)
14493{
14494 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
14495 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14496 IEM_MC_BEGIN(0,0);
14497 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14498 IEM_MC_ADVANCE_RIP();
14499 IEM_MC_END();
14500 return VINF_SUCCESS;
14501}
14502
14503
14504/** Opcode 0xdb 0xe5. */
14505FNIEMOP_DEF(iemOp_frstpm)
14506{
14507 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
14508#if 0 /* #UDs on newer CPUs */
14509 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14510 IEM_MC_BEGIN(0,0);
14511 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14512 IEM_MC_ADVANCE_RIP();
14513 IEM_MC_END();
14514 return VINF_SUCCESS;
14515#else
14516 return IEMOP_RAISE_INVALID_OPCODE();
14517#endif
14518}
14519
14520
14521/** Opcode 0xdb 11/5. */
14522FNIEMOP_DEF_1(iemOp_fucomi_stN, uint8_t, bRm)
14523{
14524 IEMOP_MNEMONIC("fucomi st0,stN");
14525 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fucomi_r80_by_r80, false /*fPop*/);
14526}
14527
14528
14529/** Opcode 0xdb 11/6. */
14530FNIEMOP_DEF_1(iemOp_fcomi_stN, uint8_t, bRm)
14531{
14532 IEMOP_MNEMONIC("fcomi st0,stN");
14533 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, false /*fPop*/);
14534}
14535
14536
14537/** Opcode 0xdb. */
14538FNIEMOP_DEF(iemOp_EscF3)
14539{
14540 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14541 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14542 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14543 {
14544 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14545 {
14546 case 0: FNIEMOP_CALL_1(iemOp_fcmovnb_stN, bRm);
14547 case 1: FNIEMOP_CALL_1(iemOp_fcmovne_stN, bRm);
14548 case 2: FNIEMOP_CALL_1(iemOp_fcmovnbe_stN, bRm);
14549 case 3: FNIEMOP_CALL_1(iemOp_fcmovnnu_stN, bRm);
14550 case 4:
14551 switch (bRm)
14552 {
14553 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
14554 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
14555 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
14556 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
14557 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
14558 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
14559 case 0xe6: return IEMOP_RAISE_INVALID_OPCODE();
14560 case 0xe7: return IEMOP_RAISE_INVALID_OPCODE();
14561 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14562 }
14563 break;
14564 case 5: return FNIEMOP_CALL_1(iemOp_fucomi_stN, bRm);
14565 case 6: return FNIEMOP_CALL_1(iemOp_fcomi_stN, bRm);
14566 case 7: return IEMOP_RAISE_INVALID_OPCODE();
14567 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14568 }
14569 }
14570 else
14571 {
14572 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14573 {
14574 case 0: return FNIEMOP_CALL_1(iemOp_fild_m32i, bRm);
14575 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m32i,bRm);
14576 case 2: return FNIEMOP_CALL_1(iemOp_fist_m32i, bRm);
14577 case 3: return FNIEMOP_CALL_1(iemOp_fistp_m32i, bRm);
14578 case 4: return IEMOP_RAISE_INVALID_OPCODE();
14579 case 5: return FNIEMOP_CALL_1(iemOp_fld_m80r, bRm);
14580 case 6: return IEMOP_RAISE_INVALID_OPCODE();
14581 case 7: return FNIEMOP_CALL_1(iemOp_fstp_m80r, bRm);
14582 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14583 }
14584 }
14585}
14586
14587
14588/**
14589 * Common worker for FPU instructions working on STn and ST0, and storing the
14590 * result in STn unless IE, DE or ZE was raised.
14591 *
14592 * @param pfnAImpl Pointer to the instruction implementation (assembly).
14593 */
14594FNIEMOP_DEF_2(iemOpHlpFpu_stN_st0, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
14595{
14596 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14597
14598 IEM_MC_BEGIN(3, 1);
14599 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14600 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14601 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14602 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
14603
14604 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14605 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14606
14607 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, bRm & X86_MODRM_RM_MASK, pr80Value2, 0)
14608 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
14609 IEM_MC_STORE_FPU_RESULT(FpuRes, bRm & X86_MODRM_RM_MASK);
14610 IEM_MC_ELSE()
14611 IEM_MC_FPU_STACK_UNDERFLOW(bRm & X86_MODRM_RM_MASK);
14612 IEM_MC_ENDIF();
14613 IEM_MC_USED_FPU();
14614 IEM_MC_ADVANCE_RIP();
14615
14616 IEM_MC_END();
14617 return VINF_SUCCESS;
14618}
14619
14620
14621/** Opcode 0xdc 11/0. */
14622FNIEMOP_DEF_1(iemOp_fadd_stN_st0, uint8_t, bRm)
14623{
14624 IEMOP_MNEMONIC("fadd stN,st0");
14625 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fadd_r80_by_r80);
14626}
14627
14628
14629/** Opcode 0xdc 11/1. */
14630FNIEMOP_DEF_1(iemOp_fmul_stN_st0, uint8_t, bRm)
14631{
14632 IEMOP_MNEMONIC("fmul stN,st0");
14633 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fmul_r80_by_r80);
14634}
14635
14636
14637/** Opcode 0xdc 11/4. */
14638FNIEMOP_DEF_1(iemOp_fsubr_stN_st0, uint8_t, bRm)
14639{
14640 IEMOP_MNEMONIC("fsubr stN,st0");
14641 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fsubr_r80_by_r80);
14642}
14643
14644
14645/** Opcode 0xdc 11/5. */
14646FNIEMOP_DEF_1(iemOp_fsub_stN_st0, uint8_t, bRm)
14647{
14648 IEMOP_MNEMONIC("fsub stN,st0");
14649 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fsub_r80_by_r80);
14650}
14651
14652
14653/** Opcode 0xdc 11/6. */
14654FNIEMOP_DEF_1(iemOp_fdivr_stN_st0, uint8_t, bRm)
14655{
14656 IEMOP_MNEMONIC("fdivr stN,st0");
14657 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fdivr_r80_by_r80);
14658}
14659
14660
14661/** Opcode 0xdc 11/7. */
14662FNIEMOP_DEF_1(iemOp_fdiv_stN_st0, uint8_t, bRm)
14663{
14664 IEMOP_MNEMONIC("fdiv stN,st0");
14665 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fdiv_r80_by_r80);
14666}
14667
14668
14669/**
14670 * Common worker for FPU instructions working on ST0 and a 64-bit floating point
14671 * memory operand, and storing the result in ST0.
14672 *
14673 * @param pfnAImpl Pointer to the instruction implementation (assembly).
14674 */
14675FNIEMOP_DEF_2(iemOpHlpFpu_ST0_m64r, uint8_t, bRm, PFNIEMAIMPLFPUR64, pfnImpl)
14676{
14677 IEM_MC_BEGIN(3, 3);
14678 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14679 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14680 IEM_MC_LOCAL(RTFLOAT64U, r64Factor2);
14681 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14682 IEM_MC_ARG(PCRTFLOAT80U, pr80Factor1, 1);
14683 IEM_MC_ARG_LOCAL_REF(PRTFLOAT64U, pr64Factor2, r64Factor2, 2);
14684
14685 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14686 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14687 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14688 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14689
14690 IEM_MC_FETCH_MEM_R64(r64Factor2, pIemCpu->iEffSeg, GCPtrEffSrc);
14691 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Factor1, 0)
14692 IEM_MC_CALL_FPU_AIMPL_3(pfnImpl, pFpuRes, pr80Factor1, pr64Factor2);
14693 IEM_MC_STORE_FPU_RESULT_MEM_OP(FpuRes, 0, pIemCpu->iEffSeg, GCPtrEffSrc);
14694 IEM_MC_ELSE()
14695 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(0, pIemCpu->iEffSeg, GCPtrEffSrc);
14696 IEM_MC_ENDIF();
14697 IEM_MC_USED_FPU();
14698 IEM_MC_ADVANCE_RIP();
14699
14700 IEM_MC_END();
14701 return VINF_SUCCESS;
14702}
14703
14704
14705/** Opcode 0xdc !11/0. */
14706FNIEMOP_DEF_1(iemOp_fadd_m64r, uint8_t, bRm)
14707{
14708 IEMOP_MNEMONIC("fadd m64r");
14709 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fadd_r80_by_r64);
14710}
14711
14712
14713/** Opcode 0xdc !11/1. */
14714FNIEMOP_DEF_1(iemOp_fmul_m64r, uint8_t, bRm)
14715{
14716 IEMOP_MNEMONIC("fmul m64r");
14717 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fmul_r80_by_r64);
14718}
14719
14720
14721/** Opcode 0xdc !11/2. */
14722FNIEMOP_DEF_1(iemOp_fcom_m64r, uint8_t, bRm)
14723{
14724 IEMOP_MNEMONIC("fcom st0,m64r");
14725
14726 IEM_MC_BEGIN(3, 3);
14727 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14728 IEM_MC_LOCAL(uint16_t, u16Fsw);
14729 IEM_MC_LOCAL(RTFLOAT64U, r64Val2);
14730 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14731 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14732 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
14733
14734 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14735 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14736
14737 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14738 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14739 IEM_MC_FETCH_MEM_R64(r64Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14740
14741 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14742 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2);
14743 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14744 IEM_MC_ELSE()
14745 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14746 IEM_MC_ENDIF();
14747 IEM_MC_USED_FPU();
14748 IEM_MC_ADVANCE_RIP();
14749
14750 IEM_MC_END();
14751 return VINF_SUCCESS;
14752}
14753
14754
14755/** Opcode 0xdc !11/3. */
14756FNIEMOP_DEF_1(iemOp_fcomp_m64r, uint8_t, bRm)
14757{
14758 IEMOP_MNEMONIC("fcomp st0,m64r");
14759
14760 IEM_MC_BEGIN(3, 3);
14761 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14762 IEM_MC_LOCAL(uint16_t, u16Fsw);
14763 IEM_MC_LOCAL(RTFLOAT64U, r64Val2);
14764 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14765 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14766 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
14767
14768 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14769 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14770
14771 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14772 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14773 IEM_MC_FETCH_MEM_R64(r64Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14774
14775 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14776 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2);
14777 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14778 IEM_MC_ELSE()
14779 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14780 IEM_MC_ENDIF();
14781 IEM_MC_USED_FPU();
14782 IEM_MC_ADVANCE_RIP();
14783
14784 IEM_MC_END();
14785 return VINF_SUCCESS;
14786}
14787
14788
14789/** Opcode 0xdc !11/4. */
14790FNIEMOP_DEF_1(iemOp_fsub_m64r, uint8_t, bRm)
14791{
14792 IEMOP_MNEMONIC("fsub m64r");
14793 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fsub_r80_by_r64);
14794}
14795
14796
14797/** Opcode 0xdc !11/5. */
14798FNIEMOP_DEF_1(iemOp_fsubr_m64r, uint8_t, bRm)
14799{
14800 IEMOP_MNEMONIC("fsubr m64r");
14801 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fsubr_r80_by_r64);
14802}
14803
14804
14805/** Opcode 0xdc !11/6. */
14806FNIEMOP_DEF_1(iemOp_fdiv_m64r, uint8_t, bRm)
14807{
14808 IEMOP_MNEMONIC("fdiv m64r");
14809 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fdiv_r80_by_r64);
14810}
14811
14812
14813/** Opcode 0xdc !11/7. */
14814FNIEMOP_DEF_1(iemOp_fdivr_m64r, uint8_t, bRm)
14815{
14816 IEMOP_MNEMONIC("fdivr m64r");
14817 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fdivr_r80_by_r64);
14818}
14819
14820
14821/** Opcode 0xdc. */
14822FNIEMOP_DEF(iemOp_EscF4)
14823{
14824 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14825 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14826 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14827 {
14828 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14829 {
14830 case 0: return FNIEMOP_CALL_1(iemOp_fadd_stN_st0, bRm);
14831 case 1: return FNIEMOP_CALL_1(iemOp_fmul_stN_st0, bRm);
14832 case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm); /* Marked reserved, intel behavior is that of FCOM ST(i). */
14833 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm); /* Marked reserved, intel behavior is that of FCOMP ST(i). */
14834 case 4: return FNIEMOP_CALL_1(iemOp_fsubr_stN_st0, bRm);
14835 case 5: return FNIEMOP_CALL_1(iemOp_fsub_stN_st0, bRm);
14836 case 6: return FNIEMOP_CALL_1(iemOp_fdivr_stN_st0, bRm);
14837 case 7: return FNIEMOP_CALL_1(iemOp_fdiv_stN_st0, bRm);
14838 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14839 }
14840 }
14841 else
14842 {
14843 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14844 {
14845 case 0: return FNIEMOP_CALL_1(iemOp_fadd_m64r, bRm);
14846 case 1: return FNIEMOP_CALL_1(iemOp_fmul_m64r, bRm);
14847 case 2: return FNIEMOP_CALL_1(iemOp_fcom_m64r, bRm);
14848 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_m64r, bRm);
14849 case 4: return FNIEMOP_CALL_1(iemOp_fsub_m64r, bRm);
14850 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_m64r, bRm);
14851 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_m64r, bRm);
14852 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_m64r, bRm);
14853 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14854 }
14855 }
14856}
14857
14858
14859/** Opcode 0xdd !11/0.
14860 * @sa iemOp_fld_m32r */
14861FNIEMOP_DEF_1(iemOp_fld_m64r, uint8_t, bRm)
14862{
14863 IEMOP_MNEMONIC("fld m64r");
14864
14865 IEM_MC_BEGIN(2, 3);
14866 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14867 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14868 IEM_MC_LOCAL(RTFLOAT64U, r64Val);
14869 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14870 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val, r64Val, 1);
14871
14872 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14873 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14874 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14875 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14876
14877 IEM_MC_FETCH_MEM_R64(r64Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14878 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14879 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r64_to_r80, pFpuRes, pr64Val);
14880 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14881 IEM_MC_ELSE()
14882 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14883 IEM_MC_ENDIF();
14884 IEM_MC_USED_FPU();
14885 IEM_MC_ADVANCE_RIP();
14886
14887 IEM_MC_END();
14888 return VINF_SUCCESS;
14889}
14890
14891
14892/** Opcode 0xdd !11/0. */
14893FNIEMOP_DEF_1(iemOp_fisttp_m64i, uint8_t, bRm)
14894{
14895 IEMOP_MNEMONIC("fisttp m64i");
14896 IEM_MC_BEGIN(3, 2);
14897 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14898 IEM_MC_LOCAL(uint16_t, u16Fsw);
14899 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14900 IEM_MC_ARG(int64_t *, pi64Dst, 1);
14901 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14902
14903 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14904 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14905 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14906 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14907
14908 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14909 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14910 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i64, pu16Fsw, pi64Dst, pr80Value);
14911 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw);
14912 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14913 IEM_MC_ELSE()
14914 IEM_MC_IF_FCW_IM()
14915 IEM_MC_STORE_MEM_I64_CONST_BY_REF(pi64Dst, INT64_MIN /* (integer indefinite) */);
14916 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W);
14917 IEM_MC_ENDIF();
14918 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14919 IEM_MC_ENDIF();
14920 IEM_MC_USED_FPU();
14921 IEM_MC_ADVANCE_RIP();
14922
14923 IEM_MC_END();
14924 return VINF_SUCCESS;
14925}
14926
14927
14928/** Opcode 0xdd !11/0. */
14929FNIEMOP_DEF_1(iemOp_fst_m64r, uint8_t, bRm)
14930{
14931 IEMOP_MNEMONIC("fst m64r");
14932 IEM_MC_BEGIN(3, 2);
14933 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14934 IEM_MC_LOCAL(uint16_t, u16Fsw);
14935 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14936 IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
14937 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14938
14939 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14940 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14941 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14942 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14943
14944 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14945 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14946 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value);
14947 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw);
14948 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14949 IEM_MC_ELSE()
14950 IEM_MC_IF_FCW_IM()
14951 IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(pr64Dst);
14952 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W);
14953 IEM_MC_ENDIF();
14954 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14955 IEM_MC_ENDIF();
14956 IEM_MC_USED_FPU();
14957 IEM_MC_ADVANCE_RIP();
14958
14959 IEM_MC_END();
14960 return VINF_SUCCESS;
14961}
14962
14963
14964
14965
14966/** Opcode 0xdd !11/0. */
14967FNIEMOP_DEF_1(iemOp_fstp_m64r, uint8_t, bRm)
14968{
14969 IEMOP_MNEMONIC("fstp m64r");
14970 IEM_MC_BEGIN(3, 2);
14971 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14972 IEM_MC_LOCAL(uint16_t, u16Fsw);
14973 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14974 IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
14975 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14976
14977 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14978 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14979 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14980 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14981
14982 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14983 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14984 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value);
14985 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw);
14986 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14987 IEM_MC_ELSE()
14988 IEM_MC_IF_FCW_IM()
14989 IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(pr64Dst);
14990 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W);
14991 IEM_MC_ENDIF();
14992 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14993 IEM_MC_ENDIF();
14994 IEM_MC_USED_FPU();
14995 IEM_MC_ADVANCE_RIP();
14996
14997 IEM_MC_END();
14998 return VINF_SUCCESS;
14999}
15000
15001
15002/** Opcode 0xdd !11/0. */
15003FNIEMOP_DEF_1(iemOp_frstor, uint8_t, bRm)
15004{
15005 IEMOP_MNEMONIC("fxrstor m94/108byte");
15006 IEM_MC_BEGIN(3, 0);
15007 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
15008 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
15009 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
15010 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15011 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15012 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15013 IEM_MC_CALL_CIMPL_3(iemCImpl_frstor, enmEffOpSize, iEffSeg, GCPtrEffSrc);
15014 IEM_MC_END();
15015 return VINF_SUCCESS;
15016}
15017
15018
15019/** Opcode 0xdd !11/0. */
15020FNIEMOP_DEF_1(iemOp_fnsave, uint8_t, bRm)
15021{
15022 IEMOP_MNEMONIC("fnsave m94/108byte");
15023 IEM_MC_BEGIN(3, 0);
15024 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
15025 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
15026 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
15027 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15028 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15029 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15030 IEM_MC_CALL_CIMPL_3(iemCImpl_fnsave, enmEffOpSize, iEffSeg, GCPtrEffDst);
15031 IEM_MC_END();
15032 return VINF_SUCCESS;
15033
15034}
15035
15036/** Opcode 0xdd !11/0. */
15037FNIEMOP_DEF_1(iemOp_fnstsw, uint8_t, bRm)
15038{
15039 IEMOP_MNEMONIC("fnstsw m16");
15040
15041 IEM_MC_BEGIN(0, 2);
15042 IEM_MC_LOCAL(uint16_t, u16Tmp);
15043 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15044
15045 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15046 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15047 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15048
15049 IEM_MC_FETCH_FSW(u16Tmp);
15050 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
15051 IEM_MC_ADVANCE_RIP();
15052
15053/** @todo Debug / drop a hint to the verifier that things may differ
15054 * from REM. Seen 0x4020 (iem) vs 0x4000 (rem) at 0008:801c6b88 booting
15055 * NT4SP1. (X86_FSW_PE) */
15056 IEM_MC_END();
15057 return VINF_SUCCESS;
15058}
15059
15060
15061/** Opcode 0xdd 11/0. */
15062FNIEMOP_DEF_1(iemOp_ffree_stN, uint8_t, bRm)
15063{
15064 IEMOP_MNEMONIC("ffree stN");
15065 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15066 /* Note! C0, C1, C2 and C3 are documented as undefined, we leave the
15067 unmodified. */
15068
15069 IEM_MC_BEGIN(0, 0);
15070
15071 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15072 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15073
15074 IEM_MC_FPU_STACK_FREE(bRm & X86_MODRM_RM_MASK);
15075 IEM_MC_UPDATE_FPU_OPCODE_IP();
15076
15077 IEM_MC_USED_FPU();
15078 IEM_MC_ADVANCE_RIP();
15079 IEM_MC_END();
15080 return VINF_SUCCESS;
15081}
15082
15083
15084/** Opcode 0xdd 11/1. */
15085FNIEMOP_DEF_1(iemOp_fst_stN, uint8_t, bRm)
15086{
15087 IEMOP_MNEMONIC("fst st0,stN");
15088 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15089
15090 IEM_MC_BEGIN(0, 2);
15091 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
15092 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15093 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15094 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15095 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15096 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
15097 IEM_MC_STORE_FPU_RESULT(FpuRes, bRm & X86_MODRM_RM_MASK);
15098 IEM_MC_ELSE()
15099 IEM_MC_FPU_STACK_UNDERFLOW(bRm & X86_MODRM_RM_MASK);
15100 IEM_MC_ENDIF();
15101 IEM_MC_USED_FPU();
15102 IEM_MC_ADVANCE_RIP();
15103 IEM_MC_END();
15104 return VINF_SUCCESS;
15105}
15106
15107
15108/** Opcode 0xdd 11/3. */
15109FNIEMOP_DEF_1(iemOp_fucom_stN_st0, uint8_t, bRm)
15110{
15111 IEMOP_MNEMONIC("fcom st0,stN");
15112 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN, bRm, iemAImpl_fucom_r80_by_r80);
15113}
15114
15115
15116/** Opcode 0xdd 11/4. */
15117FNIEMOP_DEF_1(iemOp_fucomp_stN, uint8_t, bRm)
15118{
15119 IEMOP_MNEMONIC("fcomp st0,stN");
15120 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN_pop, bRm, iemAImpl_fucom_r80_by_r80);
15121}
15122
15123
15124/** Opcode 0xdd. */
15125FNIEMOP_DEF(iemOp_EscF5)
15126{
15127 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15128 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15129 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15130 {
15131 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15132 {
15133 case 0: return FNIEMOP_CALL_1(iemOp_ffree_stN, bRm);
15134 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, intel behavior is that of XCHG ST(i). */
15135 case 2: return FNIEMOP_CALL_1(iemOp_fst_stN, bRm);
15136 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm);
15137 case 4: return FNIEMOP_CALL_1(iemOp_fucom_stN_st0,bRm);
15138 case 5: return FNIEMOP_CALL_1(iemOp_fucomp_stN, bRm);
15139 case 6: return IEMOP_RAISE_INVALID_OPCODE();
15140 case 7: return IEMOP_RAISE_INVALID_OPCODE();
15141 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15142 }
15143 }
15144 else
15145 {
15146 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15147 {
15148 case 0: return FNIEMOP_CALL_1(iemOp_fld_m64r, bRm);
15149 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m64i, bRm);
15150 case 2: return FNIEMOP_CALL_1(iemOp_fst_m64r, bRm);
15151 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m64r, bRm);
15152 case 4: return FNIEMOP_CALL_1(iemOp_frstor, bRm);
15153 case 5: return IEMOP_RAISE_INVALID_OPCODE();
15154 case 6: return FNIEMOP_CALL_1(iemOp_fnsave, bRm);
15155 case 7: return FNIEMOP_CALL_1(iemOp_fnstsw, bRm);
15156 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15157 }
15158 }
15159}
15160
15161
15162/** Opcode 0xde 11/0. */
15163FNIEMOP_DEF_1(iemOp_faddp_stN_st0, uint8_t, bRm)
15164{
15165 IEMOP_MNEMONIC("faddp stN,st0");
15166 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fadd_r80_by_r80);
15167}
15168
15169
15170/** Opcode 0xde 11/0. */
15171FNIEMOP_DEF_1(iemOp_fmulp_stN_st0, uint8_t, bRm)
15172{
15173 IEMOP_MNEMONIC("fmulp stN,st0");
15174 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fmul_r80_by_r80);
15175}
15176
15177
15178/** Opcode 0xde 0xd9. */
15179FNIEMOP_DEF(iemOp_fcompp)
15180{
15181 IEMOP_MNEMONIC("fucompp st0,stN");
15182 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, iemAImpl_fcom_r80_by_r80);
15183}
15184
15185
15186/** Opcode 0xde 11/4. */
15187FNIEMOP_DEF_1(iemOp_fsubrp_stN_st0, uint8_t, bRm)
15188{
15189 IEMOP_MNEMONIC("fsubrp stN,st0");
15190 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fsubr_r80_by_r80);
15191}
15192
15193
15194/** Opcode 0xde 11/5. */
15195FNIEMOP_DEF_1(iemOp_fsubp_stN_st0, uint8_t, bRm)
15196{
15197 IEMOP_MNEMONIC("fsubp stN,st0");
15198 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fsub_r80_by_r80);
15199}
15200
15201
15202/** Opcode 0xde 11/6. */
15203FNIEMOP_DEF_1(iemOp_fdivrp_stN_st0, uint8_t, bRm)
15204{
15205 IEMOP_MNEMONIC("fdivrp stN,st0");
15206 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fdivr_r80_by_r80);
15207}
15208
15209
15210/** Opcode 0xde 11/7. */
15211FNIEMOP_DEF_1(iemOp_fdivp_stN_st0, uint8_t, bRm)
15212{
15213 IEMOP_MNEMONIC("fdivp stN,st0");
15214 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fdiv_r80_by_r80);
15215}
15216
15217
15218/**
15219 * Common worker for FPU instructions working on ST0 and an m16i, and storing
15220 * the result in ST0.
15221 *
15222 * @param pfnAImpl Pointer to the instruction implementation (assembly).
15223 */
15224FNIEMOP_DEF_2(iemOpHlpFpu_st0_m16i, uint8_t, bRm, PFNIEMAIMPLFPUI16, pfnAImpl)
15225{
15226 IEM_MC_BEGIN(3, 3);
15227 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15228 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15229 IEM_MC_LOCAL(int16_t, i16Val2);
15230 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
15231 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15232 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15233
15234 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15235 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15236
15237 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15238 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15239 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15240
15241 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15242 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pi16Val2);
15243 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
15244 IEM_MC_ELSE()
15245 IEM_MC_FPU_STACK_UNDERFLOW(0);
15246 IEM_MC_ENDIF();
15247 IEM_MC_USED_FPU();
15248 IEM_MC_ADVANCE_RIP();
15249
15250 IEM_MC_END();
15251 return VINF_SUCCESS;
15252}
15253
15254
15255/** Opcode 0xde !11/0. */
15256FNIEMOP_DEF_1(iemOp_fiadd_m16i, uint8_t, bRm)
15257{
15258 IEMOP_MNEMONIC("fiadd m16i");
15259 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fiadd_r80_by_i16);
15260}
15261
15262
15263/** Opcode 0xde !11/1. */
15264FNIEMOP_DEF_1(iemOp_fimul_m16i, uint8_t, bRm)
15265{
15266 IEMOP_MNEMONIC("fimul m16i");
15267 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fimul_r80_by_i16);
15268}
15269
15270
15271/** Opcode 0xde !11/2. */
15272FNIEMOP_DEF_1(iemOp_ficom_m16i, uint8_t, bRm)
15273{
15274 IEMOP_MNEMONIC("ficom st0,m16i");
15275
15276 IEM_MC_BEGIN(3, 3);
15277 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15278 IEM_MC_LOCAL(uint16_t, u16Fsw);
15279 IEM_MC_LOCAL(int16_t, i16Val2);
15280 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15281 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15282 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15283
15284 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15285 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15286
15287 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15288 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15289 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15290
15291 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15292 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2);
15293 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15294 IEM_MC_ELSE()
15295 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15296 IEM_MC_ENDIF();
15297 IEM_MC_USED_FPU();
15298 IEM_MC_ADVANCE_RIP();
15299
15300 IEM_MC_END();
15301 return VINF_SUCCESS;
15302}
15303
15304
15305/** Opcode 0xde !11/3. */
15306FNIEMOP_DEF_1(iemOp_ficomp_m16i, uint8_t, bRm)
15307{
15308 IEMOP_MNEMONIC("ficomp st0,m16i");
15309
15310 IEM_MC_BEGIN(3, 3);
15311 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15312 IEM_MC_LOCAL(uint16_t, u16Fsw);
15313 IEM_MC_LOCAL(int16_t, i16Val2);
15314 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15315 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15316 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15317
15318 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15319 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15320
15321 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15322 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15323 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15324
15325 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15326 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2);
15327 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15328 IEM_MC_ELSE()
15329 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15330 IEM_MC_ENDIF();
15331 IEM_MC_USED_FPU();
15332 IEM_MC_ADVANCE_RIP();
15333
15334 IEM_MC_END();
15335 return VINF_SUCCESS;
15336}
15337
15338
15339/** Opcode 0xde !11/4. */
15340FNIEMOP_DEF_1(iemOp_fisub_m16i, uint8_t, bRm)
15341{
15342 IEMOP_MNEMONIC("fisub m16i");
15343 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fisub_r80_by_i16);
15344}
15345
15346
15347/** Opcode 0xde !11/5. */
15348FNIEMOP_DEF_1(iemOp_fisubr_m16i, uint8_t, bRm)
15349{
15350 IEMOP_MNEMONIC("fisubr m16i");
15351 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fisubr_r80_by_i16);
15352}
15353
15354
15355/** Opcode 0xde !11/6. */
15356FNIEMOP_DEF_1(iemOp_fidiv_m16i, uint8_t, bRm)
15357{
15358 IEMOP_MNEMONIC("fiadd m16i");
15359 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fidiv_r80_by_i16);
15360}
15361
15362
15363/** Opcode 0xde !11/7. */
15364FNIEMOP_DEF_1(iemOp_fidivr_m16i, uint8_t, bRm)
15365{
15366 IEMOP_MNEMONIC("fiadd m16i");
15367 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fidivr_r80_by_i16);
15368}
15369
15370
15371/** Opcode 0xde. */
15372FNIEMOP_DEF(iemOp_EscF6)
15373{
15374 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15375 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15376 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15377 {
15378 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15379 {
15380 case 0: return FNIEMOP_CALL_1(iemOp_faddp_stN_st0, bRm);
15381 case 1: return FNIEMOP_CALL_1(iemOp_fmulp_stN_st0, bRm);
15382 case 2: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm);
15383 case 3: if (bRm == 0xd9)
15384 return FNIEMOP_CALL(iemOp_fcompp);
15385 return IEMOP_RAISE_INVALID_OPCODE();
15386 case 4: return FNIEMOP_CALL_1(iemOp_fsubrp_stN_st0, bRm);
15387 case 5: return FNIEMOP_CALL_1(iemOp_fsubp_stN_st0, bRm);
15388 case 6: return FNIEMOP_CALL_1(iemOp_fdivrp_stN_st0, bRm);
15389 case 7: return FNIEMOP_CALL_1(iemOp_fdivp_stN_st0, bRm);
15390 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15391 }
15392 }
15393 else
15394 {
15395 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15396 {
15397 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_m16i, bRm);
15398 case 1: return FNIEMOP_CALL_1(iemOp_fimul_m16i, bRm);
15399 case 2: return FNIEMOP_CALL_1(iemOp_ficom_m16i, bRm);
15400 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_m16i, bRm);
15401 case 4: return FNIEMOP_CALL_1(iemOp_fisub_m16i, bRm);
15402 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_m16i, bRm);
15403 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_m16i, bRm);
15404 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_m16i, bRm);
15405 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15406 }
15407 }
15408}
15409
15410
15411/** Opcode 0xdf 11/0.
15412 * Undocument instruction, assumed to work like ffree + fincstp. */
15413FNIEMOP_DEF_1(iemOp_ffreep_stN, uint8_t, bRm)
15414{
15415 IEMOP_MNEMONIC("ffreep stN");
15416 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15417
15418 IEM_MC_BEGIN(0, 0);
15419
15420 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15421 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15422
15423 IEM_MC_FPU_STACK_FREE(bRm & X86_MODRM_RM_MASK);
15424 IEM_MC_FPU_STACK_INC_TOP();
15425 IEM_MC_UPDATE_FPU_OPCODE_IP();
15426
15427 IEM_MC_USED_FPU();
15428 IEM_MC_ADVANCE_RIP();
15429 IEM_MC_END();
15430 return VINF_SUCCESS;
15431}
15432
15433
15434/** Opcode 0xdf 0xe0. */
15435FNIEMOP_DEF(iemOp_fnstsw_ax)
15436{
15437 IEMOP_MNEMONIC("fnstsw ax");
15438 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15439
15440 IEM_MC_BEGIN(0, 1);
15441 IEM_MC_LOCAL(uint16_t, u16Tmp);
15442 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15443 IEM_MC_FETCH_FSW(u16Tmp);
15444 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
15445 IEM_MC_ADVANCE_RIP();
15446 IEM_MC_END();
15447 return VINF_SUCCESS;
15448}
15449
15450
15451/** Opcode 0xdf 11/5. */
15452FNIEMOP_DEF_1(iemOp_fucomip_st0_stN, uint8_t, bRm)
15453{
15454 IEMOP_MNEMONIC("fcomip st0,stN");
15455 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
15456}
15457
15458
15459/** Opcode 0xdf 11/6. */
15460FNIEMOP_DEF_1(iemOp_fcomip_st0_stN, uint8_t, bRm)
15461{
15462 IEMOP_MNEMONIC("fcomip st0,stN");
15463 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
15464}
15465
15466
15467/** Opcode 0xdf !11/0. */
15468FNIEMOP_STUB_1(iemOp_fild_m16i, uint8_t, bRm);
15469
15470
15471/** Opcode 0xdf !11/1. */
15472FNIEMOP_DEF_1(iemOp_fisttp_m16i, uint8_t, bRm)
15473{
15474 IEMOP_MNEMONIC("fisttp m16i");
15475 IEM_MC_BEGIN(3, 2);
15476 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15477 IEM_MC_LOCAL(uint16_t, u16Fsw);
15478 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15479 IEM_MC_ARG(int16_t *, pi16Dst, 1);
15480 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15481
15482 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15483 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15484 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15485 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15486
15487 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15488 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15489 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
15490 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
15491 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15492 IEM_MC_ELSE()
15493 IEM_MC_IF_FCW_IM()
15494 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
15495 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
15496 IEM_MC_ENDIF();
15497 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15498 IEM_MC_ENDIF();
15499 IEM_MC_USED_FPU();
15500 IEM_MC_ADVANCE_RIP();
15501
15502 IEM_MC_END();
15503 return VINF_SUCCESS;
15504}
15505
15506
15507/** Opcode 0xdf !11/2. */
15508FNIEMOP_DEF_1(iemOp_fist_m16i, uint8_t, bRm)
15509{
15510 IEMOP_MNEMONIC("fistp m16i");
15511 IEM_MC_BEGIN(3, 2);
15512 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15513 IEM_MC_LOCAL(uint16_t, u16Fsw);
15514 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15515 IEM_MC_ARG(int16_t *, pi16Dst, 1);
15516 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15517
15518 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15519 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15520 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15521 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15522
15523 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15524 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15525 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
15526 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
15527 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15528 IEM_MC_ELSE()
15529 IEM_MC_IF_FCW_IM()
15530 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
15531 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
15532 IEM_MC_ENDIF();
15533 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15534 IEM_MC_ENDIF();
15535 IEM_MC_USED_FPU();
15536 IEM_MC_ADVANCE_RIP();
15537
15538 IEM_MC_END();
15539 return VINF_SUCCESS;
15540}
15541
15542
15543/** Opcode 0xdf !11/3. */
15544FNIEMOP_DEF_1(iemOp_fistp_m16i, uint8_t, bRm)
15545{
15546 IEMOP_MNEMONIC("fistp m16i");
15547 IEM_MC_BEGIN(3, 2);
15548 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15549 IEM_MC_LOCAL(uint16_t, u16Fsw);
15550 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15551 IEM_MC_ARG(int16_t *, pi16Dst, 1);
15552 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15553
15554 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15555 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15556 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15557 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15558
15559 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15560 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15561 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
15562 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
15563 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15564 IEM_MC_ELSE()
15565 IEM_MC_IF_FCW_IM()
15566 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
15567 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
15568 IEM_MC_ENDIF();
15569 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15570 IEM_MC_ENDIF();
15571 IEM_MC_USED_FPU();
15572 IEM_MC_ADVANCE_RIP();
15573
15574 IEM_MC_END();
15575 return VINF_SUCCESS;
15576}
15577
15578
15579/** Opcode 0xdf !11/4. */
15580FNIEMOP_STUB_1(iemOp_fbld_m80d, uint8_t, bRm);
15581
15582/** Opcode 0xdf !11/5. */
15583FNIEMOP_STUB_1(iemOp_fild_m64i, uint8_t, bRm);
15584
15585/** Opcode 0xdf !11/6. */
15586FNIEMOP_STUB_1(iemOp_fbstp_m80d, uint8_t, bRm);
15587
15588
15589/** Opcode 0xdf !11/7. */
15590FNIEMOP_DEF_1(iemOp_fistp_m64i, uint8_t, bRm)
15591{
15592 IEMOP_MNEMONIC("fistp m64i");
15593 IEM_MC_BEGIN(3, 2);
15594 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15595 IEM_MC_LOCAL(uint16_t, u16Fsw);
15596 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15597 IEM_MC_ARG(int64_t *, pi64Dst, 1);
15598 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15599
15600 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15601 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15602 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15603 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15604
15605 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15606 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15607 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i64, pu16Fsw, pi64Dst, pr80Value);
15608 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw);
15609 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15610 IEM_MC_ELSE()
15611 IEM_MC_IF_FCW_IM()
15612 IEM_MC_STORE_MEM_I64_CONST_BY_REF(pi64Dst, INT64_MIN /* (integer indefinite) */);
15613 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W);
15614 IEM_MC_ENDIF();
15615 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15616 IEM_MC_ENDIF();
15617 IEM_MC_USED_FPU();
15618 IEM_MC_ADVANCE_RIP();
15619
15620 IEM_MC_END();
15621 return VINF_SUCCESS;
15622}
15623
15624
15625/** Opcode 0xdf. */
15626FNIEMOP_DEF(iemOp_EscF7)
15627{
15628 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15629 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15630 {
15631 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15632 {
15633 case 0: return FNIEMOP_CALL_1(iemOp_ffreep_stN, bRm); /* ffree + pop afterwards, since forever according to AMD. */
15634 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, behaves like FXCH ST(i) on intel. */
15635 case 2: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
15636 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
15637 case 4: if (bRm == 0xe0)
15638 return FNIEMOP_CALL(iemOp_fnstsw_ax);
15639 return IEMOP_RAISE_INVALID_OPCODE();
15640 case 5: return FNIEMOP_CALL_1(iemOp_fucomip_st0_stN, bRm);
15641 case 6: return FNIEMOP_CALL_1(iemOp_fcomip_st0_stN, bRm);
15642 case 7: return IEMOP_RAISE_INVALID_OPCODE();
15643 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15644 }
15645 }
15646 else
15647 {
15648 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15649 {
15650 case 0: return FNIEMOP_CALL_1(iemOp_fild_m16i, bRm);
15651 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m16i, bRm);
15652 case 2: return FNIEMOP_CALL_1(iemOp_fist_m16i, bRm);
15653 case 3: return FNIEMOP_CALL_1(iemOp_fistp_m16i, bRm);
15654 case 4: return FNIEMOP_CALL_1(iemOp_fbld_m80d, bRm);
15655 case 5: return FNIEMOP_CALL_1(iemOp_fild_m64i, bRm);
15656 case 6: return FNIEMOP_CALL_1(iemOp_fbstp_m80d, bRm);
15657 case 7: return FNIEMOP_CALL_1(iemOp_fistp_m64i, bRm);
15658 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15659 }
15660 }
15661}
15662
15663
15664/** Opcode 0xe0. */
15665FNIEMOP_DEF(iemOp_loopne_Jb)
15666{
15667 IEMOP_MNEMONIC("loopne Jb");
15668 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15669 IEMOP_HLP_NO_LOCK_PREFIX();
15670 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15671
15672 switch (pIemCpu->enmEffAddrMode)
15673 {
15674 case IEMMODE_16BIT:
15675 IEM_MC_BEGIN(0,0);
15676 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
15677 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
15678 IEM_MC_REL_JMP_S8(i8Imm);
15679 } IEM_MC_ELSE() {
15680 IEM_MC_ADVANCE_RIP();
15681 } IEM_MC_ENDIF();
15682 IEM_MC_END();
15683 return VINF_SUCCESS;
15684
15685 case IEMMODE_32BIT:
15686 IEM_MC_BEGIN(0,0);
15687 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
15688 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
15689 IEM_MC_REL_JMP_S8(i8Imm);
15690 } IEM_MC_ELSE() {
15691 IEM_MC_ADVANCE_RIP();
15692 } IEM_MC_ENDIF();
15693 IEM_MC_END();
15694 return VINF_SUCCESS;
15695
15696 case IEMMODE_64BIT:
15697 IEM_MC_BEGIN(0,0);
15698 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
15699 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
15700 IEM_MC_REL_JMP_S8(i8Imm);
15701 } IEM_MC_ELSE() {
15702 IEM_MC_ADVANCE_RIP();
15703 } IEM_MC_ENDIF();
15704 IEM_MC_END();
15705 return VINF_SUCCESS;
15706
15707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15708 }
15709}
15710
15711
15712/** Opcode 0xe1. */
15713FNIEMOP_DEF(iemOp_loope_Jb)
15714{
15715 IEMOP_MNEMONIC("loope Jb");
15716 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15717 IEMOP_HLP_NO_LOCK_PREFIX();
15718 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15719
15720 switch (pIemCpu->enmEffAddrMode)
15721 {
15722 case IEMMODE_16BIT:
15723 IEM_MC_BEGIN(0,0);
15724 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
15725 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
15726 IEM_MC_REL_JMP_S8(i8Imm);
15727 } IEM_MC_ELSE() {
15728 IEM_MC_ADVANCE_RIP();
15729 } IEM_MC_ENDIF();
15730 IEM_MC_END();
15731 return VINF_SUCCESS;
15732
15733 case IEMMODE_32BIT:
15734 IEM_MC_BEGIN(0,0);
15735 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
15736 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
15737 IEM_MC_REL_JMP_S8(i8Imm);
15738 } IEM_MC_ELSE() {
15739 IEM_MC_ADVANCE_RIP();
15740 } IEM_MC_ENDIF();
15741 IEM_MC_END();
15742 return VINF_SUCCESS;
15743
15744 case IEMMODE_64BIT:
15745 IEM_MC_BEGIN(0,0);
15746 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
15747 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
15748 IEM_MC_REL_JMP_S8(i8Imm);
15749 } IEM_MC_ELSE() {
15750 IEM_MC_ADVANCE_RIP();
15751 } IEM_MC_ENDIF();
15752 IEM_MC_END();
15753 return VINF_SUCCESS;
15754
15755 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15756 }
15757}
15758
15759
15760/** Opcode 0xe2. */
15761FNIEMOP_DEF(iemOp_loop_Jb)
15762{
15763 IEMOP_MNEMONIC("loop Jb");
15764 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15765 IEMOP_HLP_NO_LOCK_PREFIX();
15766 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15767
15768 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
15769 * using the 32-bit operand size override. How can that be restarted? See
15770 * weird pseudo code in intel manual. */
15771 switch (pIemCpu->enmEffAddrMode)
15772 {
15773 case IEMMODE_16BIT:
15774 IEM_MC_BEGIN(0,0);
15775 if (-(int8_t)pIemCpu->offOpcode != i8Imm)
15776 {
15777 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
15778 IEM_MC_IF_CX_IS_NZ() {
15779 IEM_MC_REL_JMP_S8(i8Imm);
15780 } IEM_MC_ELSE() {
15781 IEM_MC_ADVANCE_RIP();
15782 } IEM_MC_ENDIF();
15783 }
15784 else
15785 {
15786 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xCX, 0);
15787 IEM_MC_ADVANCE_RIP();
15788 }
15789 IEM_MC_END();
15790 return VINF_SUCCESS;
15791
15792 case IEMMODE_32BIT:
15793 IEM_MC_BEGIN(0,0);
15794 if (-(int8_t)pIemCpu->offOpcode != i8Imm)
15795 {
15796 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
15797 IEM_MC_IF_ECX_IS_NZ() {
15798 IEM_MC_REL_JMP_S8(i8Imm);
15799 } IEM_MC_ELSE() {
15800 IEM_MC_ADVANCE_RIP();
15801 } IEM_MC_ENDIF();
15802 }
15803 else
15804 {
15805 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xCX, 0);
15806 IEM_MC_ADVANCE_RIP();
15807 }
15808 IEM_MC_END();
15809 return VINF_SUCCESS;
15810
15811 case IEMMODE_64BIT:
15812 IEM_MC_BEGIN(0,0);
15813 if (-(int8_t)pIemCpu->offOpcode != i8Imm)
15814 {
15815 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
15816 IEM_MC_IF_RCX_IS_NZ() {
15817 IEM_MC_REL_JMP_S8(i8Imm);
15818 } IEM_MC_ELSE() {
15819 IEM_MC_ADVANCE_RIP();
15820 } IEM_MC_ENDIF();
15821 }
15822 else
15823 {
15824 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xCX, 0);
15825 IEM_MC_ADVANCE_RIP();
15826 }
15827 IEM_MC_END();
15828 return VINF_SUCCESS;
15829
15830 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15831 }
15832}
15833
15834
15835/** Opcode 0xe3. */
15836FNIEMOP_DEF(iemOp_jecxz_Jb)
15837{
15838 IEMOP_MNEMONIC("jecxz Jb");
15839 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15840 IEMOP_HLP_NO_LOCK_PREFIX();
15841 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15842
15843 switch (pIemCpu->enmEffAddrMode)
15844 {
15845 case IEMMODE_16BIT:
15846 IEM_MC_BEGIN(0,0);
15847 IEM_MC_IF_CX_IS_NZ() {
15848 IEM_MC_ADVANCE_RIP();
15849 } IEM_MC_ELSE() {
15850 IEM_MC_REL_JMP_S8(i8Imm);
15851 } IEM_MC_ENDIF();
15852 IEM_MC_END();
15853 return VINF_SUCCESS;
15854
15855 case IEMMODE_32BIT:
15856 IEM_MC_BEGIN(0,0);
15857 IEM_MC_IF_ECX_IS_NZ() {
15858 IEM_MC_ADVANCE_RIP();
15859 } IEM_MC_ELSE() {
15860 IEM_MC_REL_JMP_S8(i8Imm);
15861 } IEM_MC_ENDIF();
15862 IEM_MC_END();
15863 return VINF_SUCCESS;
15864
15865 case IEMMODE_64BIT:
15866 IEM_MC_BEGIN(0,0);
15867 IEM_MC_IF_RCX_IS_NZ() {
15868 IEM_MC_ADVANCE_RIP();
15869 } IEM_MC_ELSE() {
15870 IEM_MC_REL_JMP_S8(i8Imm);
15871 } IEM_MC_ENDIF();
15872 IEM_MC_END();
15873 return VINF_SUCCESS;
15874
15875 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15876 }
15877}
15878
15879
15880/** Opcode 0xe4 */
15881FNIEMOP_DEF(iemOp_in_AL_Ib)
15882{
15883 IEMOP_MNEMONIC("in eAX,Ib");
15884 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15885 IEMOP_HLP_NO_LOCK_PREFIX();
15886 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
15887}
15888
15889
15890/** Opcode 0xe5 */
15891FNIEMOP_DEF(iemOp_in_eAX_Ib)
15892{
15893 IEMOP_MNEMONIC("in eAX,Ib");
15894 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15895 IEMOP_HLP_NO_LOCK_PREFIX();
15896 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
15897}
15898
15899
15900/** Opcode 0xe6 */
15901FNIEMOP_DEF(iemOp_out_Ib_AL)
15902{
15903 IEMOP_MNEMONIC("out Ib,AL");
15904 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15905 IEMOP_HLP_NO_LOCK_PREFIX();
15906 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
15907}
15908
15909
15910/** Opcode 0xe7 */
15911FNIEMOP_DEF(iemOp_out_Ib_eAX)
15912{
15913 IEMOP_MNEMONIC("out Ib,eAX");
15914 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15915 IEMOP_HLP_NO_LOCK_PREFIX();
15916 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
15917}
15918
15919
15920/** Opcode 0xe8. */
15921FNIEMOP_DEF(iemOp_call_Jv)
15922{
15923 IEMOP_MNEMONIC("call Jv");
15924 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15925 switch (pIemCpu->enmEffOpSize)
15926 {
15927 case IEMMODE_16BIT:
15928 {
15929 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
15930 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int16_t)u16Imm);
15931 }
15932
15933 case IEMMODE_32BIT:
15934 {
15935 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
15936 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
15937 }
15938
15939 case IEMMODE_64BIT:
15940 {
15941 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
15942 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
15943 }
15944
15945 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15946 }
15947}
15948
15949
15950/** Opcode 0xe9. */
15951FNIEMOP_DEF(iemOp_jmp_Jv)
15952{
15953 IEMOP_MNEMONIC("jmp Jv");
15954 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15955 switch (pIemCpu->enmEffOpSize)
15956 {
15957 case IEMMODE_16BIT:
15958 {
15959 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
15960 IEM_MC_BEGIN(0, 0);
15961 IEM_MC_REL_JMP_S16(i16Imm);
15962 IEM_MC_END();
15963 return VINF_SUCCESS;
15964 }
15965
15966 case IEMMODE_64BIT:
15967 case IEMMODE_32BIT:
15968 {
15969 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
15970 IEM_MC_BEGIN(0, 0);
15971 IEM_MC_REL_JMP_S32(i32Imm);
15972 IEM_MC_END();
15973 return VINF_SUCCESS;
15974 }
15975
15976 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15977 }
15978}
15979
15980
15981/** Opcode 0xea. */
15982FNIEMOP_DEF(iemOp_jmp_Ap)
15983{
15984 IEMOP_MNEMONIC("jmp Ap");
15985 IEMOP_HLP_NO_64BIT();
15986
15987 /* Decode the far pointer address and pass it on to the far call C implementation. */
15988 uint32_t offSeg;
15989 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
15990 IEM_OPCODE_GET_NEXT_U32(&offSeg);
15991 else
15992 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
15993 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
15994 IEMOP_HLP_NO_LOCK_PREFIX();
15995 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
15996}
15997
15998
15999/** Opcode 0xeb. */
16000FNIEMOP_DEF(iemOp_jmp_Jb)
16001{
16002 IEMOP_MNEMONIC("jmp Jb");
16003 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
16004 IEMOP_HLP_NO_LOCK_PREFIX();
16005 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16006
16007 IEM_MC_BEGIN(0, 0);
16008 IEM_MC_REL_JMP_S8(i8Imm);
16009 IEM_MC_END();
16010 return VINF_SUCCESS;
16011}
16012
16013
16014/** Opcode 0xec */
16015FNIEMOP_DEF(iemOp_in_AL_DX)
16016{
16017 IEMOP_MNEMONIC("in AL,DX");
16018 IEMOP_HLP_NO_LOCK_PREFIX();
16019 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
16020}
16021
16022
16023/** Opcode 0xed */
16024FNIEMOP_DEF(iemOp_eAX_DX)
16025{
16026 IEMOP_MNEMONIC("in eAX,DX");
16027 IEMOP_HLP_NO_LOCK_PREFIX();
16028 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
16029}
16030
16031
16032/** Opcode 0xee */
16033FNIEMOP_DEF(iemOp_out_DX_AL)
16034{
16035 IEMOP_MNEMONIC("out DX,AL");
16036 IEMOP_HLP_NO_LOCK_PREFIX();
16037 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
16038}
16039
16040
16041/** Opcode 0xef */
16042FNIEMOP_DEF(iemOp_out_DX_eAX)
16043{
16044 IEMOP_MNEMONIC("out DX,eAX");
16045 IEMOP_HLP_NO_LOCK_PREFIX();
16046 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
16047}
16048
16049
16050/** Opcode 0xf0. */
16051FNIEMOP_DEF(iemOp_lock)
16052{
16053 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("lock");
16054 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
16055
16056 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16057 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16058}
16059
16060
16061/** Opcode 0xf2. */
16062FNIEMOP_DEF(iemOp_repne)
16063{
16064 /* This overrides any previous REPE prefix. */
16065 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
16066 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repne");
16067 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
16068
16069 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16070 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16071}
16072
16073
16074/** Opcode 0xf3. */
16075FNIEMOP_DEF(iemOp_repe)
16076{
16077 /* This overrides any previous REPNE prefix. */
16078 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
16079 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repe");
16080 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
16081
16082 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16083 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16084}
16085
16086
16087/** Opcode 0xf4. */
16088FNIEMOP_DEF(iemOp_hlt)
16089{
16090 IEMOP_HLP_NO_LOCK_PREFIX();
16091 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
16092}
16093
16094
16095/** Opcode 0xf5. */
16096FNIEMOP_DEF(iemOp_cmc)
16097{
16098 IEMOP_MNEMONIC("cmc");
16099 IEMOP_HLP_NO_LOCK_PREFIX();
16100 IEM_MC_BEGIN(0, 0);
16101 IEM_MC_FLIP_EFL_BIT(X86_EFL_CF);
16102 IEM_MC_ADVANCE_RIP();
16103 IEM_MC_END();
16104 return VINF_SUCCESS;
16105}
16106
16107
16108/**
16109 * Common implementation of 'inc/dec/not/neg Eb'.
16110 *
16111 * @param bRm The RM byte.
16112 * @param pImpl The instruction implementation.
16113 */
16114FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
16115{
16116 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16117 {
16118 /* register access */
16119 IEM_MC_BEGIN(2, 0);
16120 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16121 IEM_MC_ARG(uint32_t *, pEFlags, 1);
16122 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16123 IEM_MC_REF_EFLAGS(pEFlags);
16124 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
16125 IEM_MC_ADVANCE_RIP();
16126 IEM_MC_END();
16127 }
16128 else
16129 {
16130 /* memory access. */
16131 IEM_MC_BEGIN(2, 2);
16132 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16133 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16134 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16135
16136 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16137 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16138 IEM_MC_FETCH_EFLAGS(EFlags);
16139 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16140 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
16141 else
16142 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
16143
16144 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
16145 IEM_MC_COMMIT_EFLAGS(EFlags);
16146 IEM_MC_ADVANCE_RIP();
16147 IEM_MC_END();
16148 }
16149 return VINF_SUCCESS;
16150}
16151
16152
16153/**
16154 * Common implementation of 'inc/dec/not/neg Ev'.
16155 *
16156 * @param bRm The RM byte.
16157 * @param pImpl The instruction implementation.
16158 */
16159FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
16160{
16161 /* Registers are handled by a common worker. */
16162 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16163 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16164
16165 /* Memory we do here. */
16166 switch (pIemCpu->enmEffOpSize)
16167 {
16168 case IEMMODE_16BIT:
16169 IEM_MC_BEGIN(2, 2);
16170 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16171 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16172 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16173
16174 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16175 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16176 IEM_MC_FETCH_EFLAGS(EFlags);
16177 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16178 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
16179 else
16180 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
16181
16182 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
16183 IEM_MC_COMMIT_EFLAGS(EFlags);
16184 IEM_MC_ADVANCE_RIP();
16185 IEM_MC_END();
16186 return VINF_SUCCESS;
16187
16188 case IEMMODE_32BIT:
16189 IEM_MC_BEGIN(2, 2);
16190 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16191 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16192 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16193
16194 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16195 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16196 IEM_MC_FETCH_EFLAGS(EFlags);
16197 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16198 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
16199 else
16200 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
16201
16202 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
16203 IEM_MC_COMMIT_EFLAGS(EFlags);
16204 IEM_MC_ADVANCE_RIP();
16205 IEM_MC_END();
16206 return VINF_SUCCESS;
16207
16208 case IEMMODE_64BIT:
16209 IEM_MC_BEGIN(2, 2);
16210 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16211 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16212 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16213
16214 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16215 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16216 IEM_MC_FETCH_EFLAGS(EFlags);
16217 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16218 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
16219 else
16220 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
16221
16222 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
16223 IEM_MC_COMMIT_EFLAGS(EFlags);
16224 IEM_MC_ADVANCE_RIP();
16225 IEM_MC_END();
16226 return VINF_SUCCESS;
16227
16228 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16229 }
16230}
16231
16232
16233/** Opcode 0xf6 /0. */
16234FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
16235{
16236 IEMOP_MNEMONIC("test Eb,Ib");
16237 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
16238
16239 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16240 {
16241 /* register access */
16242 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16243 IEMOP_HLP_NO_LOCK_PREFIX();
16244
16245 IEM_MC_BEGIN(3, 0);
16246 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16247 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
16248 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16249 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16250 IEM_MC_REF_EFLAGS(pEFlags);
16251 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
16252 IEM_MC_ADVANCE_RIP();
16253 IEM_MC_END();
16254 }
16255 else
16256 {
16257 /* memory access. */
16258 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16259
16260 IEM_MC_BEGIN(3, 2);
16261 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16262 IEM_MC_ARG(uint8_t, u8Src, 1);
16263 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16264 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16265
16266 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
16267 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16268 IEM_MC_ASSIGN(u8Src, u8Imm);
16269 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16270 IEM_MC_FETCH_EFLAGS(EFlags);
16271 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
16272
16273 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
16274 IEM_MC_COMMIT_EFLAGS(EFlags);
16275 IEM_MC_ADVANCE_RIP();
16276 IEM_MC_END();
16277 }
16278 return VINF_SUCCESS;
16279}
16280
16281
16282/** Opcode 0xf7 /0. */
16283FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
16284{
16285 IEMOP_MNEMONIC("test Ev,Iv");
16286 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16287 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
16288
16289 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16290 {
16291 /* register access */
16292 switch (pIemCpu->enmEffOpSize)
16293 {
16294 case IEMMODE_16BIT:
16295 {
16296 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
16297 IEM_MC_BEGIN(3, 0);
16298 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16299 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
16300 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16301 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16302 IEM_MC_REF_EFLAGS(pEFlags);
16303 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
16304 IEM_MC_ADVANCE_RIP();
16305 IEM_MC_END();
16306 return VINF_SUCCESS;
16307 }
16308
16309 case IEMMODE_32BIT:
16310 {
16311 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
16312 IEM_MC_BEGIN(3, 0);
16313 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16314 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
16315 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16316 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16317 IEM_MC_REF_EFLAGS(pEFlags);
16318 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
16319 /* No clearing the high dword here - test doesn't write back the result. */
16320 IEM_MC_ADVANCE_RIP();
16321 IEM_MC_END();
16322 return VINF_SUCCESS;
16323 }
16324
16325 case IEMMODE_64BIT:
16326 {
16327 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
16328 IEM_MC_BEGIN(3, 0);
16329 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16330 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
16331 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16332 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16333 IEM_MC_REF_EFLAGS(pEFlags);
16334 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
16335 IEM_MC_ADVANCE_RIP();
16336 IEM_MC_END();
16337 return VINF_SUCCESS;
16338 }
16339
16340 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16341 }
16342 }
16343 else
16344 {
16345 /* memory access. */
16346 switch (pIemCpu->enmEffOpSize)
16347 {
16348 case IEMMODE_16BIT:
16349 {
16350 IEM_MC_BEGIN(3, 2);
16351 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16352 IEM_MC_ARG(uint16_t, u16Src, 1);
16353 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16354 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16355
16356 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
16357 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
16358 IEM_MC_ASSIGN(u16Src, u16Imm);
16359 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16360 IEM_MC_FETCH_EFLAGS(EFlags);
16361 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
16362
16363 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
16364 IEM_MC_COMMIT_EFLAGS(EFlags);
16365 IEM_MC_ADVANCE_RIP();
16366 IEM_MC_END();
16367 return VINF_SUCCESS;
16368 }
16369
16370 case IEMMODE_32BIT:
16371 {
16372 IEM_MC_BEGIN(3, 2);
16373 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16374 IEM_MC_ARG(uint32_t, u32Src, 1);
16375 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16376 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16377
16378 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
16379 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
16380 IEM_MC_ASSIGN(u32Src, u32Imm);
16381 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16382 IEM_MC_FETCH_EFLAGS(EFlags);
16383 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
16384
16385 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
16386 IEM_MC_COMMIT_EFLAGS(EFlags);
16387 IEM_MC_ADVANCE_RIP();
16388 IEM_MC_END();
16389 return VINF_SUCCESS;
16390 }
16391
16392 case IEMMODE_64BIT:
16393 {
16394 IEM_MC_BEGIN(3, 2);
16395 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16396 IEM_MC_ARG(uint64_t, u64Src, 1);
16397 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16398 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16399
16400 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
16401 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
16402 IEM_MC_ASSIGN(u64Src, u64Imm);
16403 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16404 IEM_MC_FETCH_EFLAGS(EFlags);
16405 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
16406
16407 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
16408 IEM_MC_COMMIT_EFLAGS(EFlags);
16409 IEM_MC_ADVANCE_RIP();
16410 IEM_MC_END();
16411 return VINF_SUCCESS;
16412 }
16413
16414 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16415 }
16416 }
16417}
16418
16419
16420/** Opcode 0xf6 /4, /5, /6 and /7. */
16421FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
16422{
16423 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16424
16425 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16426 {
16427 /* register access */
16428 IEMOP_HLP_NO_LOCK_PREFIX();
16429 IEM_MC_BEGIN(3, 1);
16430 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16431 IEM_MC_ARG(uint8_t, u8Value, 1);
16432 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16433 IEM_MC_LOCAL(int32_t, rc);
16434
16435 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16436 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16437 IEM_MC_REF_EFLAGS(pEFlags);
16438 IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
16439 IEM_MC_IF_LOCAL_IS_Z(rc) {
16440 IEM_MC_ADVANCE_RIP();
16441 } IEM_MC_ELSE() {
16442 IEM_MC_RAISE_DIVIDE_ERROR();
16443 } IEM_MC_ENDIF();
16444
16445 IEM_MC_END();
16446 }
16447 else
16448 {
16449 /* memory access. */
16450 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16451
16452 IEM_MC_BEGIN(3, 2);
16453 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16454 IEM_MC_ARG(uint8_t, u8Value, 1);
16455 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16456 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16457 IEM_MC_LOCAL(int32_t, rc);
16458
16459 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16460 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
16461 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16462 IEM_MC_REF_EFLAGS(pEFlags);
16463 IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
16464 IEM_MC_IF_LOCAL_IS_Z(rc) {
16465 IEM_MC_ADVANCE_RIP();
16466 } IEM_MC_ELSE() {
16467 IEM_MC_RAISE_DIVIDE_ERROR();
16468 } IEM_MC_ENDIF();
16469
16470 IEM_MC_END();
16471 }
16472 return VINF_SUCCESS;
16473}
16474
16475
16476/** Opcode 0xf7 /4, /5, /6 and /7. */
16477FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
16478{
16479 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16480 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16481
16482 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16483 {
16484 /* register access */
16485 switch (pIemCpu->enmEffOpSize)
16486 {
16487 case IEMMODE_16BIT:
16488 {
16489 IEMOP_HLP_NO_LOCK_PREFIX();
16490 IEM_MC_BEGIN(4, 1);
16491 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16492 IEM_MC_ARG(uint16_t *, pu16DX, 1);
16493 IEM_MC_ARG(uint16_t, u16Value, 2);
16494 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16495 IEM_MC_LOCAL(int32_t, rc);
16496
16497 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16498 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16499 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
16500 IEM_MC_REF_EFLAGS(pEFlags);
16501 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
16502 IEM_MC_IF_LOCAL_IS_Z(rc) {
16503 IEM_MC_ADVANCE_RIP();
16504 } IEM_MC_ELSE() {
16505 IEM_MC_RAISE_DIVIDE_ERROR();
16506 } IEM_MC_ENDIF();
16507
16508 IEM_MC_END();
16509 return VINF_SUCCESS;
16510 }
16511
16512 case IEMMODE_32BIT:
16513 {
16514 IEMOP_HLP_NO_LOCK_PREFIX();
16515 IEM_MC_BEGIN(4, 1);
16516 IEM_MC_ARG(uint32_t *, pu32AX, 0);
16517 IEM_MC_ARG(uint32_t *, pu32DX, 1);
16518 IEM_MC_ARG(uint32_t, u32Value, 2);
16519 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16520 IEM_MC_LOCAL(int32_t, rc);
16521
16522 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16523 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
16524 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
16525 IEM_MC_REF_EFLAGS(pEFlags);
16526 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
16527 IEM_MC_IF_LOCAL_IS_Z(rc) {
16528 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
16529 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
16530 IEM_MC_ADVANCE_RIP();
16531 } IEM_MC_ELSE() {
16532 IEM_MC_RAISE_DIVIDE_ERROR();
16533 } IEM_MC_ENDIF();
16534
16535 IEM_MC_END();
16536 return VINF_SUCCESS;
16537 }
16538
16539 case IEMMODE_64BIT:
16540 {
16541 IEMOP_HLP_NO_LOCK_PREFIX();
16542 IEM_MC_BEGIN(4, 1);
16543 IEM_MC_ARG(uint64_t *, pu64AX, 0);
16544 IEM_MC_ARG(uint64_t *, pu64DX, 1);
16545 IEM_MC_ARG(uint64_t, u64Value, 2);
16546 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16547 IEM_MC_LOCAL(int32_t, rc);
16548
16549 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16550 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
16551 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
16552 IEM_MC_REF_EFLAGS(pEFlags);
16553 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
16554 IEM_MC_IF_LOCAL_IS_Z(rc) {
16555 IEM_MC_ADVANCE_RIP();
16556 } IEM_MC_ELSE() {
16557 IEM_MC_RAISE_DIVIDE_ERROR();
16558 } IEM_MC_ENDIF();
16559
16560 IEM_MC_END();
16561 return VINF_SUCCESS;
16562 }
16563
16564 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16565 }
16566 }
16567 else
16568 {
16569 /* memory access. */
16570 switch (pIemCpu->enmEffOpSize)
16571 {
16572 case IEMMODE_16BIT:
16573 {
16574 IEMOP_HLP_NO_LOCK_PREFIX();
16575 IEM_MC_BEGIN(4, 2);
16576 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16577 IEM_MC_ARG(uint16_t *, pu16DX, 1);
16578 IEM_MC_ARG(uint16_t, u16Value, 2);
16579 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16580 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16581 IEM_MC_LOCAL(int32_t, rc);
16582
16583 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16584 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
16585 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16586 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
16587 IEM_MC_REF_EFLAGS(pEFlags);
16588 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
16589 IEM_MC_IF_LOCAL_IS_Z(rc) {
16590 IEM_MC_ADVANCE_RIP();
16591 } IEM_MC_ELSE() {
16592 IEM_MC_RAISE_DIVIDE_ERROR();
16593 } IEM_MC_ENDIF();
16594
16595 IEM_MC_END();
16596 return VINF_SUCCESS;
16597 }
16598
16599 case IEMMODE_32BIT:
16600 {
16601 IEMOP_HLP_NO_LOCK_PREFIX();
16602 IEM_MC_BEGIN(4, 2);
16603 IEM_MC_ARG(uint32_t *, pu32AX, 0);
16604 IEM_MC_ARG(uint32_t *, pu32DX, 1);
16605 IEM_MC_ARG(uint32_t, u32Value, 2);
16606 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16607 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16608 IEM_MC_LOCAL(int32_t, rc);
16609
16610 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16611 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
16612 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
16613 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
16614 IEM_MC_REF_EFLAGS(pEFlags);
16615 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
16616 IEM_MC_IF_LOCAL_IS_Z(rc) {
16617 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
16618 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
16619 IEM_MC_ADVANCE_RIP();
16620 } IEM_MC_ELSE() {
16621 IEM_MC_RAISE_DIVIDE_ERROR();
16622 } IEM_MC_ENDIF();
16623
16624 IEM_MC_END();
16625 return VINF_SUCCESS;
16626 }
16627
16628 case IEMMODE_64BIT:
16629 {
16630 IEMOP_HLP_NO_LOCK_PREFIX();
16631 IEM_MC_BEGIN(4, 2);
16632 IEM_MC_ARG(uint64_t *, pu64AX, 0);
16633 IEM_MC_ARG(uint64_t *, pu64DX, 1);
16634 IEM_MC_ARG(uint64_t, u64Value, 2);
16635 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16636 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16637 IEM_MC_LOCAL(int32_t, rc);
16638
16639 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16640 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
16641 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
16642 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
16643 IEM_MC_REF_EFLAGS(pEFlags);
16644 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
16645 IEM_MC_IF_LOCAL_IS_Z(rc) {
16646 IEM_MC_ADVANCE_RIP();
16647 } IEM_MC_ELSE() {
16648 IEM_MC_RAISE_DIVIDE_ERROR();
16649 } IEM_MC_ENDIF();
16650
16651 IEM_MC_END();
16652 return VINF_SUCCESS;
16653 }
16654
16655 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16656 }
16657 }
16658}
16659
16660/** Opcode 0xf6. */
16661FNIEMOP_DEF(iemOp_Grp3_Eb)
16662{
16663 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16664 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16665 {
16666 case 0:
16667 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
16668 case 1:
16669 return IEMOP_RAISE_INVALID_OPCODE();
16670 case 2:
16671 IEMOP_MNEMONIC("not Eb");
16672 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
16673 case 3:
16674 IEMOP_MNEMONIC("neg Eb");
16675 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
16676 case 4:
16677 IEMOP_MNEMONIC("mul Eb");
16678 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16679 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
16680 case 5:
16681 IEMOP_MNEMONIC("imul Eb");
16682 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16683 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
16684 case 6:
16685 IEMOP_MNEMONIC("div Eb");
16686 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16687 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
16688 case 7:
16689 IEMOP_MNEMONIC("idiv Eb");
16690 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16691 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
16692 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16693 }
16694}
16695
16696
16697/** Opcode 0xf7. */
16698FNIEMOP_DEF(iemOp_Grp3_Ev)
16699{
16700 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16701 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16702 {
16703 case 0:
16704 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
16705 case 1:
16706 return IEMOP_RAISE_INVALID_OPCODE();
16707 case 2:
16708 IEMOP_MNEMONIC("not Ev");
16709 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
16710 case 3:
16711 IEMOP_MNEMONIC("neg Ev");
16712 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
16713 case 4:
16714 IEMOP_MNEMONIC("mul Ev");
16715 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16716 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
16717 case 5:
16718 IEMOP_MNEMONIC("imul Ev");
16719 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16720 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
16721 case 6:
16722 IEMOP_MNEMONIC("div Ev");
16723 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16724 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
16725 case 7:
16726 IEMOP_MNEMONIC("idiv Ev");
16727 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16728 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
16729 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16730 }
16731}
16732
16733
16734/** Opcode 0xf8. */
16735FNIEMOP_DEF(iemOp_clc)
16736{
16737 IEMOP_MNEMONIC("clc");
16738 IEMOP_HLP_NO_LOCK_PREFIX();
16739 IEM_MC_BEGIN(0, 0);
16740 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
16741 IEM_MC_ADVANCE_RIP();
16742 IEM_MC_END();
16743 return VINF_SUCCESS;
16744}
16745
16746
16747/** Opcode 0xf9. */
16748FNIEMOP_DEF(iemOp_stc)
16749{
16750 IEMOP_MNEMONIC("stc");
16751 IEMOP_HLP_NO_LOCK_PREFIX();
16752 IEM_MC_BEGIN(0, 0);
16753 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
16754 IEM_MC_ADVANCE_RIP();
16755 IEM_MC_END();
16756 return VINF_SUCCESS;
16757}
16758
16759
16760/** Opcode 0xfa. */
16761FNIEMOP_DEF(iemOp_cli)
16762{
16763 IEMOP_MNEMONIC("cli");
16764 IEMOP_HLP_NO_LOCK_PREFIX();
16765 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
16766}
16767
16768
16769FNIEMOP_DEF(iemOp_sti)
16770{
16771 IEMOP_MNEMONIC("sti");
16772 IEMOP_HLP_NO_LOCK_PREFIX();
16773 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
16774}
16775
16776
16777/** Opcode 0xfc. */
16778FNIEMOP_DEF(iemOp_cld)
16779{
16780 IEMOP_MNEMONIC("cld");
16781 IEMOP_HLP_NO_LOCK_PREFIX();
16782 IEM_MC_BEGIN(0, 0);
16783 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
16784 IEM_MC_ADVANCE_RIP();
16785 IEM_MC_END();
16786 return VINF_SUCCESS;
16787}
16788
16789
16790/** Opcode 0xfd. */
16791FNIEMOP_DEF(iemOp_std)
16792{
16793 IEMOP_MNEMONIC("std");
16794 IEMOP_HLP_NO_LOCK_PREFIX();
16795 IEM_MC_BEGIN(0, 0);
16796 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
16797 IEM_MC_ADVANCE_RIP();
16798 IEM_MC_END();
16799 return VINF_SUCCESS;
16800}
16801
16802
16803/** Opcode 0xfe. */
16804FNIEMOP_DEF(iemOp_Grp4)
16805{
16806 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16807 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16808 {
16809 case 0:
16810 IEMOP_MNEMONIC("inc Ev");
16811 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
16812 case 1:
16813 IEMOP_MNEMONIC("dec Ev");
16814 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
16815 default:
16816 IEMOP_MNEMONIC("grp4-ud");
16817 return IEMOP_RAISE_INVALID_OPCODE();
16818 }
16819}
16820
16821
16822/**
16823 * Opcode 0xff /2.
16824 * @param bRm The RM byte.
16825 */
16826FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
16827{
16828 IEMOP_MNEMONIC("calln Ev");
16829 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
16830 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16831
16832 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16833 {
16834 /* The new RIP is taken from a register. */
16835 switch (pIemCpu->enmEffOpSize)
16836 {
16837 case IEMMODE_16BIT:
16838 IEM_MC_BEGIN(1, 0);
16839 IEM_MC_ARG(uint16_t, u16Target, 0);
16840 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16841 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
16842 IEM_MC_END()
16843 return VINF_SUCCESS;
16844
16845 case IEMMODE_32BIT:
16846 IEM_MC_BEGIN(1, 0);
16847 IEM_MC_ARG(uint32_t, u32Target, 0);
16848 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16849 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
16850 IEM_MC_END()
16851 return VINF_SUCCESS;
16852
16853 case IEMMODE_64BIT:
16854 IEM_MC_BEGIN(1, 0);
16855 IEM_MC_ARG(uint64_t, u64Target, 0);
16856 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16857 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
16858 IEM_MC_END()
16859 return VINF_SUCCESS;
16860
16861 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16862 }
16863 }
16864 else
16865 {
16866 /* The new RIP is taken from a register. */
16867 switch (pIemCpu->enmEffOpSize)
16868 {
16869 case IEMMODE_16BIT:
16870 IEM_MC_BEGIN(1, 1);
16871 IEM_MC_ARG(uint16_t, u16Target, 0);
16872 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16873 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16874 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16875 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
16876 IEM_MC_END()
16877 return VINF_SUCCESS;
16878
16879 case IEMMODE_32BIT:
16880 IEM_MC_BEGIN(1, 1);
16881 IEM_MC_ARG(uint32_t, u32Target, 0);
16882 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16883 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16884 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16885 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
16886 IEM_MC_END()
16887 return VINF_SUCCESS;
16888
16889 case IEMMODE_64BIT:
16890 IEM_MC_BEGIN(1, 1);
16891 IEM_MC_ARG(uint64_t, u64Target, 0);
16892 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16893 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16894 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16895 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
16896 IEM_MC_END()
16897 return VINF_SUCCESS;
16898
16899 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16900 }
16901 }
16902}
16903
16904typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize);
16905
16906FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImpl)
16907{
16908 /* Registers? How?? */
16909 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16910 return IEMOP_RAISE_INVALID_OPCODE(); /* callf eax is not legal */
16911
16912 /* Far pointer loaded from memory. */
16913 switch (pIemCpu->enmEffOpSize)
16914 {
16915 case IEMMODE_16BIT:
16916 IEM_MC_BEGIN(3, 1);
16917 IEM_MC_ARG(uint16_t, u16Sel, 0);
16918 IEM_MC_ARG(uint16_t, offSeg, 1);
16919 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
16920 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16921 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16922 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16923 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
16924 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
16925 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
16926 IEM_MC_END();
16927 return VINF_SUCCESS;
16928
16929 case IEMMODE_64BIT:
16930 /** @todo testcase: AMD does not seem to believe in the case (see bs-cpu-xcpt-1)
16931 * and will apparently ignore REX.W, at least for the jmp far qword [rsp]
16932 * and call far qword [rsp] encodings. */
16933 if (!IEM_IS_GUEST_CPU_AMD(pIemCpu))
16934 {
16935 IEM_MC_BEGIN(3, 1);
16936 IEM_MC_ARG(uint16_t, u16Sel, 0);
16937 IEM_MC_ARG(uint64_t, offSeg, 1);
16938 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
16939 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16940 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16941 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16942 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
16943 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
16944 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
16945 IEM_MC_END();
16946 return VINF_SUCCESS;
16947 }
16948 /* AMD falls thru. */
16949
16950 case IEMMODE_32BIT:
16951 IEM_MC_BEGIN(3, 1);
16952 IEM_MC_ARG(uint16_t, u16Sel, 0);
16953 IEM_MC_ARG(uint32_t, offSeg, 1);
16954 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
16955 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16956 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16957 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16958 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
16959 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
16960 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
16961 IEM_MC_END();
16962 return VINF_SUCCESS;
16963
16964 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16965 }
16966}
16967
16968
16969/**
16970 * Opcode 0xff /3.
16971 * @param bRm The RM byte.
16972 */
16973FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
16974{
16975 IEMOP_MNEMONIC("callf Ep");
16976 return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_callf);
16977}
16978
16979
16980/**
16981 * Opcode 0xff /4.
16982 * @param bRm The RM byte.
16983 */
16984FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
16985{
16986 IEMOP_MNEMONIC("jmpn Ev");
16987 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
16988 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16989
16990 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16991 {
16992 /* The new RIP is taken from a register. */
16993 switch (pIemCpu->enmEffOpSize)
16994 {
16995 case IEMMODE_16BIT:
16996 IEM_MC_BEGIN(0, 1);
16997 IEM_MC_LOCAL(uint16_t, u16Target);
16998 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16999 IEM_MC_SET_RIP_U16(u16Target);
17000 IEM_MC_END()
17001 return VINF_SUCCESS;
17002
17003 case IEMMODE_32BIT:
17004 IEM_MC_BEGIN(0, 1);
17005 IEM_MC_LOCAL(uint32_t, u32Target);
17006 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17007 IEM_MC_SET_RIP_U32(u32Target);
17008 IEM_MC_END()
17009 return VINF_SUCCESS;
17010
17011 case IEMMODE_64BIT:
17012 IEM_MC_BEGIN(0, 1);
17013 IEM_MC_LOCAL(uint64_t, u64Target);
17014 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17015 IEM_MC_SET_RIP_U64(u64Target);
17016 IEM_MC_END()
17017 return VINF_SUCCESS;
17018
17019 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17020 }
17021 }
17022 else
17023 {
17024 /* The new RIP is taken from a memory location. */
17025 switch (pIemCpu->enmEffOpSize)
17026 {
17027 case IEMMODE_16BIT:
17028 IEM_MC_BEGIN(0, 2);
17029 IEM_MC_LOCAL(uint16_t, u16Target);
17030 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17031 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17032 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17033 IEM_MC_SET_RIP_U16(u16Target);
17034 IEM_MC_END()
17035 return VINF_SUCCESS;
17036
17037 case IEMMODE_32BIT:
17038 IEM_MC_BEGIN(0, 2);
17039 IEM_MC_LOCAL(uint32_t, u32Target);
17040 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17041 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17042 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17043 IEM_MC_SET_RIP_U32(u32Target);
17044 IEM_MC_END()
17045 return VINF_SUCCESS;
17046
17047 case IEMMODE_64BIT:
17048 IEM_MC_BEGIN(0, 2);
17049 IEM_MC_LOCAL(uint64_t, u64Target);
17050 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17051 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17052 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17053 IEM_MC_SET_RIP_U64(u64Target);
17054 IEM_MC_END()
17055 return VINF_SUCCESS;
17056
17057 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17058 }
17059 }
17060}
17061
17062
17063/**
17064 * Opcode 0xff /5.
17065 * @param bRm The RM byte.
17066 */
17067FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
17068{
17069 IEMOP_MNEMONIC("jmpf Ep");
17070 return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_FarJmp);
17071}
17072
17073
17074/**
17075 * Opcode 0xff /6.
17076 * @param bRm The RM byte.
17077 */
17078FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
17079{
17080 IEMOP_MNEMONIC("push Ev");
17081 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
17082
17083 /* Registers are handled by a common worker. */
17084 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
17085 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17086
17087 /* Memory we do here. */
17088 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
17089 switch (pIemCpu->enmEffOpSize)
17090 {
17091 case IEMMODE_16BIT:
17092 IEM_MC_BEGIN(0, 2);
17093 IEM_MC_LOCAL(uint16_t, u16Src);
17094 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17095 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17096 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17097 IEM_MC_PUSH_U16(u16Src);
17098 IEM_MC_ADVANCE_RIP();
17099 IEM_MC_END();
17100 return VINF_SUCCESS;
17101
17102 case IEMMODE_32BIT:
17103 IEM_MC_BEGIN(0, 2);
17104 IEM_MC_LOCAL(uint32_t, u32Src);
17105 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17106 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17107 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17108 IEM_MC_PUSH_U32(u32Src);
17109 IEM_MC_ADVANCE_RIP();
17110 IEM_MC_END();
17111 return VINF_SUCCESS;
17112
17113 case IEMMODE_64BIT:
17114 IEM_MC_BEGIN(0, 2);
17115 IEM_MC_LOCAL(uint64_t, u64Src);
17116 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17117 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17118 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17119 IEM_MC_PUSH_U64(u64Src);
17120 IEM_MC_ADVANCE_RIP();
17121 IEM_MC_END();
17122 return VINF_SUCCESS;
17123
17124 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17125 }
17126}
17127
17128
17129/** Opcode 0xff. */
17130FNIEMOP_DEF(iemOp_Grp5)
17131{
17132 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
17133 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
17134 {
17135 case 0:
17136 IEMOP_MNEMONIC("inc Ev");
17137 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
17138 case 1:
17139 IEMOP_MNEMONIC("dec Ev");
17140 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
17141 case 2:
17142 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
17143 case 3:
17144 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
17145 case 4:
17146 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
17147 case 5:
17148 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
17149 case 6:
17150 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
17151 case 7:
17152 IEMOP_MNEMONIC("grp5-ud");
17153 return IEMOP_RAISE_INVALID_OPCODE();
17154 }
17155 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
17156}
17157
17158
17159
17160const PFNIEMOP g_apfnOneByteMap[256] =
17161{
17162 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
17163 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
17164 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
17165 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
17166 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
17167 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
17168 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
17169 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
17170 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
17171 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
17172 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
17173 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
17174 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
17175 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
17176 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
17177 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
17178 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
17179 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
17180 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
17181 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
17182 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
17183 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
17184 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
17185 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
17186 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw_movsx_Gv_Ev,
17187 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
17188 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
17189 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
17190 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
17191 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
17192 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
17193 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
17194 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
17195 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
17196 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
17197 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_Grp1A,
17198 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
17199 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
17200 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
17201 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
17202 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
17203 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
17204 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
17205 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
17206 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
17207 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
17208 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
17209 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
17210 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
17211 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
17212 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
17213 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
17214 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
17215 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
17216 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
17217 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
17218 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
17219 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
17220 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
17221 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
17222 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe, /** @todo 0xf1 is INT1 / ICEBP. */
17223 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
17224 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
17225 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
17226};
17227
17228
17229/** @} */
17230
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette