VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp@ 107044

Last change on this file since 107044 was 106362, checked in by vboxsync, 6 weeks ago

VMM/DBGF: Prepare DBGF to support ARMv8/A64 style breakpoints for the VMM debugger. This converts the x86 centric int3 naming to software breakpoint, bugref:10393

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 398.9 KB
Line 
1/* $Id: IEMAllCImpl.cpp 106362 2024-10-16 13:08:09Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#define IEM_WITH_OPAQUE_DECODER_STATE
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/cpum.h>
37#include <VBox/vmm/apic.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/iom.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/nem.h>
44#include <VBox/vmm/gim.h>
45#include <VBox/vmm/gcm.h>
46#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
47# include <VBox/vmm/em.h>
48# include <VBox/vmm/hm_svm.h>
49#endif
50#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
51# include <VBox/vmm/hmvmxinline.h>
52#endif
53#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
54# include <VBox/vmm/cpuidcall.h>
55#endif
56#include <VBox/vmm/tm.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/dbgftrace.h>
59#include "IEMInternal.h"
60#include <VBox/vmm/vmcc.h>
61#include <VBox/log.h>
62#include <VBox/err.h>
63#include <VBox/param.h>
64#include <VBox/dis.h>
65#include <iprt/asm-math.h>
66#include <iprt/assert.h>
67#include <iprt/string.h>
68#include <iprt/x86.h>
69
70#include "IEMInline.h"
71
72
73/*********************************************************************************************************************************
74* Defined Constants And Macros *
75*********************************************************************************************************************************/
76/**
77 * Flushes the prefetch buffer, light version.
78 * @todo The \#if conditions here must match the ones in iemOpcodeFlushLight().
79 */
80#ifndef IEM_WITH_CODE_TLB
81# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) iemOpcodeFlushLight(a_pVCpu, a_cbInstr)
82#else
83# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) do { } while (0)
84#endif
85
86/**
87 * Flushes the prefetch buffer, heavy version.
88 * @todo The \#if conditions here must match the ones in iemOpcodeFlushHeavy().
89 */
90#if !defined(IEM_WITH_CODE_TLB) || 1
91# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) iemOpcodeFlushHeavy(a_pVCpu, a_cbInstr)
92#else
93# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { } while (0)
94#endif
95
96
97
98/** @name Misc Helpers
99 * @{
100 */
101
102
103/**
104 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
105 *
106 * @returns Strict VBox status code.
107 *
108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
109 * @param u16Port The port number.
110 * @param cbOperand The operand size.
111 */
112static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
113{
114 /* The TSS bits we're interested in are the same on 386 and AMD64. */
115 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
116 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
117 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
118 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
119
120 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
121
122 /*
123 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
124 */
125 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
126 if (RT_UNLIKELY( pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
127 && pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
128 {
129 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
130 u16Port, cbOperand, pVCpu->cpum.GstCtx.tr.Attr.n.u4Type, pVCpu->cpum.GstCtx.tr.Attr.u));
131 return iemRaiseGeneralProtectionFault0(pVCpu);
132 }
133
134 /*
135 * Read the bitmap offset (may #PF).
136 */
137 uint16_t offBitmap;
138 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
139 pVCpu->cpum.GstCtx.tr.u64Base + RT_UOFFSETOF(X86TSS64, offIoBitmap));
140 if (rcStrict != VINF_SUCCESS)
141 {
142 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
143 return rcStrict;
144 }
145
146 /*
147 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
148 * describes the CPU actually reading two bytes regardless of whether the
149 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
150 */
151 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
152 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
153 * for instance sizeof(X86TSS32). */
154 if (offFirstBit + 1 > pVCpu->cpum.GstCtx.tr.u32Limit) /* the limit is inclusive */
155 {
156 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
157 offFirstBit, pVCpu->cpum.GstCtx.tr.u32Limit));
158 return iemRaiseGeneralProtectionFault0(pVCpu);
159 }
160
161 /*
162 * Read the necessary bits.
163 */
164 /** @todo Test the assertion in the intel manual that the CPU reads two
165 * bytes. The question is how this works wrt to \#PF and \#GP on the
166 * 2nd byte when it's not required. */
167 uint16_t bmBytes = UINT16_MAX;
168 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + offFirstBit);
169 if (rcStrict != VINF_SUCCESS)
170 {
171 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
172 return rcStrict;
173 }
174
175 /*
176 * Perform the check.
177 */
178 uint16_t fPortMask = (1 << cbOperand) - 1;
179 bmBytes >>= (u16Port & 7);
180 if (bmBytes & fPortMask)
181 {
182 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
183 u16Port, cbOperand, bmBytes, fPortMask));
184 return iemRaiseGeneralProtectionFault0(pVCpu);
185 }
186
187 return VINF_SUCCESS;
188}
189
190
191/**
192 * Checks if we are allowed to access the given I/O port, raising the
193 * appropriate exceptions if we aren't (or if the I/O bitmap is not
194 * accessible).
195 *
196 * @returns Strict VBox status code.
197 *
198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
199 * @param u16Port The port number.
200 * @param cbOperand The operand size.
201 */
202DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
203{
204 X86EFLAGS Efl;
205 Efl.u = IEMMISC_GET_EFL(pVCpu);
206 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
207 && ( IEM_GET_CPL(pVCpu) > Efl.Bits.u2IOPL
208 || Efl.Bits.u1VM) )
209 return iemHlpCheckPortIOPermissionBitmap(pVCpu, u16Port, cbOperand);
210 return VINF_SUCCESS;
211}
212
213
214#if 0
215/**
216 * Calculates the parity bit.
217 *
218 * @returns true if the bit is set, false if not.
219 * @param u8Result The least significant byte of the result.
220 */
221static bool iemHlpCalcParityFlag(uint8_t u8Result)
222{
223 /*
224 * Parity is set if the number of bits in the least significant byte of
225 * the result is even.
226 */
227 uint8_t cBits;
228 cBits = u8Result & 1; /* 0 */
229 u8Result >>= 1;
230 cBits += u8Result & 1;
231 u8Result >>= 1;
232 cBits += u8Result & 1;
233 u8Result >>= 1;
234 cBits += u8Result & 1;
235 u8Result >>= 1;
236 cBits += u8Result & 1; /* 4 */
237 u8Result >>= 1;
238 cBits += u8Result & 1;
239 u8Result >>= 1;
240 cBits += u8Result & 1;
241 u8Result >>= 1;
242 cBits += u8Result & 1;
243 return !(cBits & 1);
244}
245#endif /* not used */
246
247
248/**
249 * Updates the specified flags according to a 8-bit result.
250 *
251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
252 * @param u8Result The result to set the flags according to.
253 * @param fToUpdate The flags to update.
254 * @param fUndefined The flags that are specified as undefined.
255 */
256static void iemHlpUpdateArithEFlagsU8(PVMCPUCC pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
257{
258 uint32_t fEFlags = iemAImpl_test_u8(pVCpu->cpum.GstCtx.eflags.u, &u8Result, u8Result);
259 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
260 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
261}
262
263
264/**
265 * Updates the specified flags according to a 16-bit result.
266 *
267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
268 * @param u16Result The result to set the flags according to.
269 * @param fToUpdate The flags to update.
270 * @param fUndefined The flags that are specified as undefined.
271 */
272static void iemHlpUpdateArithEFlagsU16(PVMCPUCC pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
273{
274 uint32_t fEFlags = iemAImpl_test_u16(pVCpu->cpum.GstCtx.eflags.u, &u16Result, u16Result);
275 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
276 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
277}
278
279
280/**
281 * Helper used by iret.
282 *
283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
284 * @param uCpl The new CPL.
285 * @param pSReg Pointer to the segment register.
286 */
287static void iemHlpAdjustSelectorForNewCpl(PVMCPUCC pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
288{
289 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
290 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
291
292 if ( uCpl > pSReg->Attr.n.u2Dpl
293 && pSReg->Attr.n.u1DescType /* code or data, not system */
294 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
295 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
296 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
297}
298
299
300/**
301 * Indicates that we have modified the FPU state.
302 *
303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
304 */
305DECLINLINE(void) iemHlpUsedFpu(PVMCPUCC pVCpu)
306{
307 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
308}
309
310/** @} */
311
312/** @name C Implementations
313 * @{
314 */
315
316
317/**
318 * Implements a pop [mem16].
319 */
320IEM_CIMPL_DEF_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst)
321{
322 uint16_t u16Value;
323 RTUINT64U TmpRsp;
324 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
325 VBOXSTRICTRC rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
326 if (rcStrict == VINF_SUCCESS)
327 {
328 rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
329 if (rcStrict == VINF_SUCCESS)
330 {
331 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
332 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
333 }
334 }
335 return rcStrict;
336
337}
338
339
340/**
341 * Implements a pop [mem32].
342 */
343IEM_CIMPL_DEF_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst)
344{
345 uint32_t u32Value;
346 RTUINT64U TmpRsp;
347 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
348 VBOXSTRICTRC rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
349 if (rcStrict == VINF_SUCCESS)
350 {
351 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEffDst, u32Value);
352 if (rcStrict == VINF_SUCCESS)
353 {
354 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
355 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
356 }
357 }
358 return rcStrict;
359
360}
361
362
363/**
364 * Implements a pop [mem64].
365 */
366IEM_CIMPL_DEF_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst)
367{
368 uint64_t u64Value;
369 RTUINT64U TmpRsp;
370 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
371 VBOXSTRICTRC rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
372 if (rcStrict == VINF_SUCCESS)
373 {
374 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrEffDst, u64Value);
375 if (rcStrict == VINF_SUCCESS)
376 {
377 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
378 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
379 }
380 }
381 return rcStrict;
382
383}
384
385
386/**
387 * Implements a 16-bit popa.
388 */
389IEM_CIMPL_DEF_0(iemCImpl_popa_16)
390{
391 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
392 RTGCPTR GCPtrLast = GCPtrStart + 15;
393 VBOXSTRICTRC rcStrict;
394
395 /*
396 * The docs are a bit hard to comprehend here, but it looks like we wrap
397 * around in real mode as long as none of the individual "popa" crosses the
398 * end of the stack segment. In protected mode we check the whole access
399 * in one go. For efficiency, only do the word-by-word thing if we're in
400 * danger of wrapping around.
401 */
402 /** @todo do popa boundary / wrap-around checks. */
403 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
404 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
405 {
406 /* word-by-word */
407 RTUINT64U TmpRsp;
408 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
409 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.di, &TmpRsp);
410 if (rcStrict == VINF_SUCCESS)
411 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.si, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bp, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 {
416 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
417 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bx, &TmpRsp);
418 }
419 if (rcStrict == VINF_SUCCESS)
420 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.dx, &TmpRsp);
421 if (rcStrict == VINF_SUCCESS)
422 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.cx, &TmpRsp);
423 if (rcStrict == VINF_SUCCESS)
424 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.ax, &TmpRsp);
425 if (rcStrict == VINF_SUCCESS)
426 {
427 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
428 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
429 }
430 }
431 else
432 {
433 uint8_t bUnmapInfo;
434 uint16_t const *pau16Mem = NULL;
435 rcStrict = iemMemMap(pVCpu, (void **)&pau16Mem, &bUnmapInfo, 16, X86_SREG_SS, GCPtrStart,
436 IEM_ACCESS_STACK_R, sizeof(*pau16Mem) - 1);
437 if (rcStrict == VINF_SUCCESS)
438 {
439 pVCpu->cpum.GstCtx.di = pau16Mem[7 - X86_GREG_xDI];
440 pVCpu->cpum.GstCtx.si = pau16Mem[7 - X86_GREG_xSI];
441 pVCpu->cpum.GstCtx.bp = pau16Mem[7 - X86_GREG_xBP];
442 /* skip sp */
443 pVCpu->cpum.GstCtx.bx = pau16Mem[7 - X86_GREG_xBX];
444 pVCpu->cpum.GstCtx.dx = pau16Mem[7 - X86_GREG_xDX];
445 pVCpu->cpum.GstCtx.cx = pau16Mem[7 - X86_GREG_xCX];
446 pVCpu->cpum.GstCtx.ax = pau16Mem[7 - X86_GREG_xAX];
447 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
448 if (rcStrict == VINF_SUCCESS)
449 {
450 iemRegAddToRsp(pVCpu, 16);
451 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
452 }
453 }
454 }
455 return rcStrict;
456}
457
458
459/**
460 * Implements a 32-bit popa.
461 */
462IEM_CIMPL_DEF_0(iemCImpl_popa_32)
463{
464 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
465 RTGCPTR GCPtrLast = GCPtrStart + 31;
466 VBOXSTRICTRC rcStrict;
467
468 /*
469 * The docs are a bit hard to comprehend here, but it looks like we wrap
470 * around in real mode as long as none of the individual "popa" crosses the
471 * end of the stack segment. In protected mode we check the whole access
472 * in one go. For efficiency, only do the word-by-word thing if we're in
473 * danger of wrapping around.
474 */
475 /** @todo do popa boundary / wrap-around checks. */
476 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
477 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
478 {
479 /* word-by-word */
480 RTUINT64U TmpRsp;
481 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
482 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edi, &TmpRsp);
483 if (rcStrict == VINF_SUCCESS)
484 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.esi, &TmpRsp);
485 if (rcStrict == VINF_SUCCESS)
486 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebp, &TmpRsp);
487 if (rcStrict == VINF_SUCCESS)
488 {
489 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
490 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebx, &TmpRsp);
491 }
492 if (rcStrict == VINF_SUCCESS)
493 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edx, &TmpRsp);
494 if (rcStrict == VINF_SUCCESS)
495 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ecx, &TmpRsp);
496 if (rcStrict == VINF_SUCCESS)
497 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.eax, &TmpRsp);
498 if (rcStrict == VINF_SUCCESS)
499 {
500#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
501 pVCpu->cpum.GstCtx.rdi &= UINT32_MAX;
502 pVCpu->cpum.GstCtx.rsi &= UINT32_MAX;
503 pVCpu->cpum.GstCtx.rbp &= UINT32_MAX;
504 pVCpu->cpum.GstCtx.rbx &= UINT32_MAX;
505 pVCpu->cpum.GstCtx.rdx &= UINT32_MAX;
506 pVCpu->cpum.GstCtx.rcx &= UINT32_MAX;
507 pVCpu->cpum.GstCtx.rax &= UINT32_MAX;
508#endif
509 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
510 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
511 }
512 }
513 else
514 {
515 uint8_t bUnmapInfo;
516 uint32_t const *pau32Mem;
517 rcStrict = iemMemMap(pVCpu, (void **)&pau32Mem, &bUnmapInfo, 32, X86_SREG_SS, GCPtrStart,
518 IEM_ACCESS_STACK_R, sizeof(*pau32Mem) - 1);
519 if (rcStrict == VINF_SUCCESS)
520 {
521 pVCpu->cpum.GstCtx.rdi = pau32Mem[7 - X86_GREG_xDI];
522 pVCpu->cpum.GstCtx.rsi = pau32Mem[7 - X86_GREG_xSI];
523 pVCpu->cpum.GstCtx.rbp = pau32Mem[7 - X86_GREG_xBP];
524 /* skip esp */
525 pVCpu->cpum.GstCtx.rbx = pau32Mem[7 - X86_GREG_xBX];
526 pVCpu->cpum.GstCtx.rdx = pau32Mem[7 - X86_GREG_xDX];
527 pVCpu->cpum.GstCtx.rcx = pau32Mem[7 - X86_GREG_xCX];
528 pVCpu->cpum.GstCtx.rax = pau32Mem[7 - X86_GREG_xAX];
529 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
530 if (rcStrict == VINF_SUCCESS)
531 {
532 iemRegAddToRsp(pVCpu, 32);
533 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
534 }
535 }
536 }
537 return rcStrict;
538}
539
540
541/**
542 * Implements a 16-bit pusha.
543 */
544IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
545{
546 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
547 RTGCPTR GCPtrBottom = GCPtrTop - 15;
548 VBOXSTRICTRC rcStrict;
549
550 /*
551 * The docs are a bit hard to comprehend here, but it looks like we wrap
552 * around in real mode as long as none of the individual "pushd" crosses the
553 * end of the stack segment. In protected mode we check the whole access
554 * in one go. For efficiency, only do the word-by-word thing if we're in
555 * danger of wrapping around.
556 */
557 /** @todo do pusha boundary / wrap-around checks. */
558 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
559 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
560 {
561 /* word-by-word */
562 RTUINT64U TmpRsp;
563 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
564 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.ax, &TmpRsp);
565 if (rcStrict == VINF_SUCCESS)
566 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.cx, &TmpRsp);
567 if (rcStrict == VINF_SUCCESS)
568 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.dx, &TmpRsp);
569 if (rcStrict == VINF_SUCCESS)
570 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bx, &TmpRsp);
571 if (rcStrict == VINF_SUCCESS)
572 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.sp, &TmpRsp);
573 if (rcStrict == VINF_SUCCESS)
574 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bp, &TmpRsp);
575 if (rcStrict == VINF_SUCCESS)
576 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.si, &TmpRsp);
577 if (rcStrict == VINF_SUCCESS)
578 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.di, &TmpRsp);
579 if (rcStrict == VINF_SUCCESS)
580 {
581 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
582 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
583 }
584 }
585 else
586 {
587 GCPtrBottom--;
588 uint8_t bUnmapInfo;
589 uint16_t *pau16Mem = NULL;
590 rcStrict = iemMemMap(pVCpu, (void **)&pau16Mem, &bUnmapInfo, 16, X86_SREG_SS, GCPtrBottom,
591 IEM_ACCESS_STACK_W, sizeof(*pau16Mem) - 1);
592 if (rcStrict == VINF_SUCCESS)
593 {
594 pau16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
595 pau16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
596 pau16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
597 pau16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
598 pau16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
599 pau16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
600 pau16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
601 pau16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
602 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
603 if (rcStrict == VINF_SUCCESS)
604 {
605 iemRegSubFromRsp(pVCpu, 16);
606 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
607 }
608 }
609 }
610 return rcStrict;
611}
612
613
614/**
615 * Implements a 32-bit pusha.
616 */
617IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
618{
619 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
620 RTGCPTR GCPtrBottom = GCPtrTop - 31;
621 VBOXSTRICTRC rcStrict;
622
623 /*
624 * The docs are a bit hard to comprehend here, but it looks like we wrap
625 * around in real mode as long as none of the individual "pusha" crosses the
626 * end of the stack segment. In protected mode we check the whole access
627 * in one go. For efficiency, only do the word-by-word thing if we're in
628 * danger of wrapping around.
629 */
630 /** @todo do pusha boundary / wrap-around checks. */
631 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
632 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
633 {
634 /* word-by-word */
635 RTUINT64U TmpRsp;
636 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
637 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.eax, &TmpRsp);
638 if (rcStrict == VINF_SUCCESS)
639 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ecx, &TmpRsp);
640 if (rcStrict == VINF_SUCCESS)
641 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edx, &TmpRsp);
642 if (rcStrict == VINF_SUCCESS)
643 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebx, &TmpRsp);
644 if (rcStrict == VINF_SUCCESS)
645 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esp, &TmpRsp);
646 if (rcStrict == VINF_SUCCESS)
647 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebp, &TmpRsp);
648 if (rcStrict == VINF_SUCCESS)
649 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esi, &TmpRsp);
650 if (rcStrict == VINF_SUCCESS)
651 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edi, &TmpRsp);
652 if (rcStrict == VINF_SUCCESS)
653 {
654 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
655 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
656 }
657 }
658 else
659 {
660 GCPtrBottom--;
661 uint8_t bUnmapInfo;
662 uint32_t *pau32Mem;
663 rcStrict = iemMemMap(pVCpu, (void **)&pau32Mem, &bUnmapInfo, 32, X86_SREG_SS, GCPtrBottom,
664 IEM_ACCESS_STACK_W, sizeof(*pau32Mem) - 1);
665 if (rcStrict == VINF_SUCCESS)
666 {
667 pau32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
668 pau32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
669 pau32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
670 pau32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
671 pau32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
672 pau32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
673 pau32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
674 pau32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
675 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
676 if (rcStrict == VINF_SUCCESS)
677 {
678 iemRegSubFromRsp(pVCpu, 32);
679 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
680 }
681 }
682 }
683 return rcStrict;
684}
685
686
687/**
688 * Implements pushf.
689 *
690 *
691 * @param enmEffOpSize The effective operand size.
692 */
693IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
694{
695 VBOXSTRICTRC rcStrict;
696
697 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
698 { /* probable */ }
699 else
700 {
701 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
702 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
703 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
704 }
705
706 /*
707 * If we're in V8086 mode some care is required (which is why we're in
708 * doing this in a C implementation).
709 */
710 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
711 if ( (fEfl & X86_EFL_VM)
712 && X86_EFL_GET_IOPL(fEfl) != 3 )
713 {
714 Assert(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE);
715 if ( enmEffOpSize != IEMMODE_16BIT
716 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
717 return iemRaiseGeneralProtectionFault0(pVCpu);
718 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
719 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
720 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
721 }
722 else
723 {
724
725 /*
726 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
727 */
728 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
729
730 switch (enmEffOpSize)
731 {
732 case IEMMODE_16BIT:
733 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
734 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
735 fEfl |= UINT16_C(0xf000);
736 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
737 break;
738 case IEMMODE_32BIT:
739 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
740 break;
741 case IEMMODE_64BIT:
742 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
743 break;
744 IEM_NOT_REACHED_DEFAULT_CASE_RET();
745 }
746 }
747
748 if (rcStrict == VINF_SUCCESS)
749 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
750 return rcStrict;
751}
752
753
754/**
755 * Implements popf.
756 *
757 * @param enmEffOpSize The effective operand size.
758 */
759IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
760{
761 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu);
762 VBOXSTRICTRC rcStrict;
763 uint32_t fEflNew;
764
765 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
766 { /* probable */ }
767 else
768 {
769 Log2(("popf: Guest intercept -> #VMEXIT\n"));
770 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
771 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
772 }
773
774 /*
775 * V8086 is special as usual.
776 */
777 if (fEflOld & X86_EFL_VM)
778 {
779 /*
780 * Almost anything goes if IOPL is 3.
781 */
782 if (X86_EFL_GET_IOPL(fEflOld) == 3)
783 {
784 switch (enmEffOpSize)
785 {
786 case IEMMODE_16BIT:
787 {
788 uint16_t u16Value;
789 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
790 if (rcStrict != VINF_SUCCESS)
791 return rcStrict;
792 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
793 break;
794 }
795 case IEMMODE_32BIT:
796 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
797 if (rcStrict != VINF_SUCCESS)
798 return rcStrict;
799 break;
800 IEM_NOT_REACHED_DEFAULT_CASE_RET();
801 }
802
803 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
804 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
805 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
806 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
807 }
808 /*
809 * Interrupt flag virtualization with CR4.VME=1.
810 */
811 else if ( enmEffOpSize == IEMMODE_16BIT
812 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
813 {
814 uint16_t u16Value;
815 RTUINT64U TmpRsp;
816 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
817 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
818 if (rcStrict != VINF_SUCCESS)
819 return rcStrict;
820
821 if ( ( (u16Value & X86_EFL_IF)
822 && (fEflOld & X86_EFL_VIP))
823 || (u16Value & X86_EFL_TF) )
824 return iemRaiseGeneralProtectionFault0(pVCpu);
825
826 fEflNew = X86_EFL_RA1_MASK
827 | (u16Value & ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RAZ_MASK))
828 | (fEflOld & (UINT32_C(0xffff0000) | X86_EFL_IF | X86_EFL_IOPL) & ~(X86_EFL_VIF | X86_EFL_RF))
829 | ((uint32_t)(u16Value & X86_EFL_IF) << (X86_EFL_VIF_BIT - X86_EFL_IF_BIT));
830
831 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
832 }
833 else
834 return iemRaiseGeneralProtectionFault0(pVCpu);
835
836 }
837 /*
838 * Not in V8086 mode.
839 */
840 else
841 {
842 /* Pop the flags. */
843 switch (enmEffOpSize)
844 {
845 case IEMMODE_16BIT:
846 {
847 uint16_t u16Value;
848 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
849 if (rcStrict != VINF_SUCCESS)
850 return rcStrict;
851 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
852
853 /*
854 * Ancient CPU adjustments:
855 * - 8086, 80186, V20/30:
856 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
857 * practical reasons (masking below). We add them when pushing flags.
858 * - 80286:
859 * The NT and IOPL flags cannot be popped from real mode and are
860 * therefore always zero (since a 286 can never exit from PM and
861 * their initial value is zero). This changed on a 386 and can
862 * therefore be used to detect 286 or 386 CPU in real mode.
863 */
864 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
865 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
866 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
867 break;
868 }
869 case IEMMODE_32BIT:
870 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
871 if (rcStrict != VINF_SUCCESS)
872 return rcStrict;
873 break;
874 case IEMMODE_64BIT:
875 {
876 uint64_t u64Value;
877 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
878 if (rcStrict != VINF_SUCCESS)
879 return rcStrict;
880 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
881 break;
882 }
883 IEM_NOT_REACHED_DEFAULT_CASE_RET();
884 }
885
886 /* Merge them with the current flags. */
887 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
888 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
889 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
890 || IEM_GET_CPL(pVCpu) == 0)
891 {
892 fEflNew &= fPopfBits;
893 fEflNew |= ~fPopfBits & fEflOld;
894 }
895 else if (IEM_GET_CPL(pVCpu) <= X86_EFL_GET_IOPL(fEflOld))
896 {
897 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
898 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
899 }
900 else
901 {
902 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
903 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
904 }
905 }
906
907 /*
908 * Commit the flags.
909 */
910 Assert(fEflNew & RT_BIT_32(1));
911 IEMMISC_SET_EFL(pVCpu, fEflNew);
912 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_X86_AC) | iemCalcExecAcFlag(pVCpu);
913 return iemRegAddToRipAndFinishingClearingRfEx(pVCpu, cbInstr, fEflOld);
914}
915
916
917/**
918 * Implements far jumps and calls thru task segments (TSS).
919 *
920 * @returns VBox strict status code.
921 * @param pVCpu The cross context virtual CPU structure of the
922 * calling thread.
923 * @param cbInstr The current instruction length.
924 * @param uSel The selector.
925 * @param enmBranch The kind of branching we're performing.
926 * @param enmEffOpSize The effective operand size.
927 * @param pDesc The descriptor corresponding to @a uSel. The type is
928 * task gate.
929 */
930static VBOXSTRICTRC iemCImpl_BranchTaskSegment(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
931 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
932{
933#ifndef IEM_IMPLEMENTS_TASKSWITCH
934 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
935#else
936 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
937 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
938 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
939 RT_NOREF_PV(enmEffOpSize);
940 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
941
942 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
943 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
944 {
945 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
946 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
947 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
948 }
949
950 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
951 * far calls (see iemCImpl_callf). Most likely in both cases it should be
952 * checked here, need testcases. */
953 if (!pDesc->Legacy.Gen.u1Present)
954 {
955 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
956 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
957 }
958
959 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
960 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
961 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
962#endif
963}
964
965
966/**
967 * Implements far jumps and calls thru task gates.
968 *
969 * @returns VBox strict status code.
970 * @param pVCpu The cross context virtual CPU structure of the
971 * calling thread.
972 * @param cbInstr The current instruction length.
973 * @param uSel The selector.
974 * @param enmBranch The kind of branching we're performing.
975 * @param enmEffOpSize The effective operand size.
976 * @param pDesc The descriptor corresponding to @a uSel. The type is
977 * task gate.
978 */
979static VBOXSTRICTRC iemCImpl_BranchTaskGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
980 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
981{
982#ifndef IEM_IMPLEMENTS_TASKSWITCH
983 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
984#else
985 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
986 RT_NOREF_PV(enmEffOpSize);
987 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
988
989 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
990 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
991 {
992 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
993 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
994 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
995 }
996
997 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
998 * far calls (see iemCImpl_callf). Most likely in both cases it should be
999 * checked here, need testcases. */
1000 if (!pDesc->Legacy.Gen.u1Present)
1001 {
1002 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1003 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1004 }
1005
1006 /*
1007 * Fetch the new TSS descriptor from the GDT.
1008 */
1009 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1010 if (uSelTss & X86_SEL_LDT)
1011 {
1012 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1013 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1014 }
1015
1016 IEMSELDESC TssDesc;
1017 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1018 if (rcStrict != VINF_SUCCESS)
1019 return rcStrict;
1020
1021 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1022 {
1023 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1024 TssDesc.Legacy.Gate.u4Type));
1025 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1026 }
1027
1028 if (!TssDesc.Legacy.Gate.u1Present)
1029 {
1030 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1031 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1032 }
1033
1034 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1035 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1036 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1037#endif
1038}
1039
1040
1041/**
1042 * Implements far jumps and calls thru call gates.
1043 *
1044 * @returns VBox strict status code.
1045 * @param pVCpu The cross context virtual CPU structure of the
1046 * calling thread.
1047 * @param cbInstr The current instruction length.
1048 * @param uSel The selector.
1049 * @param enmBranch The kind of branching we're performing.
1050 * @param enmEffOpSize The effective operand size.
1051 * @param pDesc The descriptor corresponding to @a uSel. The type is
1052 * call gate.
1053 */
1054static VBOXSTRICTRC iemCImpl_BranchCallGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1055 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1056{
1057#define IEM_IMPLEMENTS_CALLGATE
1058#ifndef IEM_IMPLEMENTS_CALLGATE
1059 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1060#else
1061 RT_NOREF_PV(enmEffOpSize);
1062 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1063
1064 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1065 * inter-privilege calls and are much more complex.
1066 *
1067 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1068 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1069 * must be 16-bit or 32-bit.
1070 */
1071 /** @todo effective operand size is probably irrelevant here, only the
1072 * call gate bitness matters??
1073 */
1074 VBOXSTRICTRC rcStrict;
1075 RTPTRUNION uPtrRet;
1076 uint64_t uNewRsp;
1077 uint64_t uNewRip;
1078 uint64_t u64Base;
1079 uint32_t cbLimit;
1080 RTSEL uNewCS;
1081 IEMSELDESC DescCS;
1082
1083 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1084 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1085 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1086 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1087
1088 /* Determine the new instruction pointer from the gate descriptor. */
1089 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1090 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1091 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1092
1093 /* Perform DPL checks on the gate descriptor. */
1094 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
1095 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1096 {
1097 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1098 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
1099 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1100 }
1101
1102 /** @todo does this catch NULL selectors, too? */
1103 if (!pDesc->Legacy.Gen.u1Present)
1104 {
1105 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1106 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1107 }
1108
1109 /*
1110 * Fetch the target CS descriptor from the GDT or LDT.
1111 */
1112 uNewCS = pDesc->Legacy.Gate.u16Sel;
1113 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1114 if (rcStrict != VINF_SUCCESS)
1115 return rcStrict;
1116
1117 /* Target CS must be a code selector. */
1118 if ( !DescCS.Legacy.Gen.u1DescType
1119 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1120 {
1121 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1122 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1123 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1124 }
1125
1126 /* Privilege checks on target CS. */
1127 if (enmBranch == IEMBRANCH_JUMP)
1128 {
1129 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1130 {
1131 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1132 {
1133 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1134 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1135 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1136 }
1137 }
1138 else
1139 {
1140 if (DescCS.Legacy.Gen.u2Dpl != IEM_GET_CPL(pVCpu))
1141 {
1142 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1143 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1144 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1145 }
1146 }
1147 }
1148 else
1149 {
1150 Assert(enmBranch == IEMBRANCH_CALL);
1151 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1152 {
1153 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1154 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1155 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1156 }
1157 }
1158
1159 /* Additional long mode checks. */
1160 if (IEM_IS_LONG_MODE(pVCpu))
1161 {
1162 if (!DescCS.Legacy.Gen.u1Long)
1163 {
1164 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1165 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1166 }
1167
1168 /* L vs D. */
1169 if ( DescCS.Legacy.Gen.u1Long
1170 && DescCS.Legacy.Gen.u1DefBig)
1171 {
1172 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1173 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1174 }
1175 }
1176
1177 if (!DescCS.Legacy.Gate.u1Present)
1178 {
1179 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1180 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1181 }
1182
1183 if (enmBranch == IEMBRANCH_JUMP)
1184 {
1185 /** @todo This is very similar to regular far jumps; merge! */
1186 /* Jumps are fairly simple... */
1187
1188 /* Chop the high bits off if 16-bit gate (Intel says so). */
1189 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1190 uNewRip = (uint16_t)uNewRip;
1191
1192 /* Limit check for non-long segments. */
1193 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1194 if (DescCS.Legacy.Gen.u1Long)
1195 u64Base = 0;
1196 else
1197 {
1198 if (uNewRip > cbLimit)
1199 {
1200 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1201 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1202 }
1203 u64Base = X86DESC_BASE(&DescCS.Legacy);
1204 }
1205
1206 /* Canonical address check. */
1207 if (!IEM_IS_CANONICAL(uNewRip))
1208 {
1209 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1210 return iemRaiseNotCanonical(pVCpu);
1211 }
1212
1213 /*
1214 * Ok, everything checked out fine. Now set the accessed bit before
1215 * committing the result into CS, CSHID and RIP.
1216 */
1217 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1218 {
1219 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1220 if (rcStrict != VINF_SUCCESS)
1221 return rcStrict;
1222 /** @todo check what VT-x and AMD-V does. */
1223 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1224 }
1225
1226 /* commit */
1227 pVCpu->cpum.GstCtx.rip = uNewRip;
1228 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1229 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu); /** @todo is this right for conforming segs? or in general? */
1230 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1231 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1232 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1233 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1234 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1235 }
1236 else
1237 {
1238 Assert(enmBranch == IEMBRANCH_CALL);
1239 /* Calls are much more complicated. */
1240
1241 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < IEM_GET_CPL(pVCpu)))
1242 {
1243 /* More privilege. This is the fun part. */
1244 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1245
1246 /*
1247 * Determine new SS:rSP from the TSS.
1248 */
1249 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
1250
1251 /* Figure out where the new stack pointer is stored in the TSS. */
1252 uint8_t const uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1253 uint16_t offNewStack; /* Offset of new stack in TSS. */
1254 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1255 if (!IEM_IS_LONG_MODE(pVCpu))
1256 {
1257 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1258 {
1259 offNewStack = RT_UOFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1260 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1261 }
1262 else
1263 {
1264 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1265 offNewStack = RT_UOFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1266 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1267 }
1268 }
1269 else
1270 {
1271 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1272 offNewStack = RT_UOFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1273 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1274 }
1275
1276 /* Check against TSS limit. */
1277 if ((uint16_t)(offNewStack + cbNewStack - 1) > pVCpu->cpum.GstCtx.tr.u32Limit)
1278 {
1279 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pVCpu->cpum.GstCtx.tr.u32Limit));
1280 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pVCpu->cpum.GstCtx.tr.Sel);
1281 }
1282
1283 uint8_t bUnmapInfo;
1284 RTPTRUNION uPtrTss;
1285 RTGCPTR GCPtrTss = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
1286 rcStrict = iemMemMap(pVCpu, &uPtrTss.pv, &bUnmapInfo, cbNewStack, UINT8_MAX, GCPtrTss, IEM_ACCESS_SYS_R, 0);
1287 if (rcStrict != VINF_SUCCESS)
1288 {
1289 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1290 return rcStrict;
1291 }
1292
1293 RTSEL uNewSS;
1294 if (!IEM_IS_LONG_MODE(pVCpu))
1295 {
1296 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1297 {
1298 uNewRsp = uPtrTss.pu32[0];
1299 uNewSS = uPtrTss.pu16[2];
1300 }
1301 else
1302 {
1303 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1304 uNewRsp = uPtrTss.pu16[0];
1305 uNewSS = uPtrTss.pu16[1];
1306 }
1307 }
1308 else
1309 {
1310 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1311 /* SS will be a NULL selector, but that's valid. */
1312 uNewRsp = uPtrTss.pu64[0];
1313 uNewSS = uNewCSDpl;
1314 }
1315
1316 /* Done with the TSS now. */
1317 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1318 if (rcStrict != VINF_SUCCESS)
1319 {
1320 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1321 return rcStrict;
1322 }
1323
1324 /* Only used outside of long mode. */
1325 uint8_t const cbWords = pDesc->Legacy.Gate.u5ParmCount;
1326
1327 /* If EFER.LMA is 0, there's extra work to do. */
1328 IEMSELDESC DescSS;
1329 if (!IEM_IS_LONG_MODE(pVCpu))
1330 {
1331 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1332 {
1333 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1334 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1335 }
1336
1337 /* Grab the new SS descriptor. */
1338 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1339 if (rcStrict != VINF_SUCCESS)
1340 return rcStrict;
1341
1342 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1343 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1344 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1345 {
1346 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1347 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1348 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1349 }
1350
1351 /* Ensure new SS is a writable data segment. */
1352 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1353 {
1354 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1355 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1356 }
1357
1358 if (!DescSS.Legacy.Gen.u1Present)
1359 {
1360 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1361 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1362 }
1363 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1364 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1365 else
1366 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1367 }
1368 else
1369 {
1370 /* Just grab the new (NULL) SS descriptor. */
1371 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1372 * like we do... */
1373 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1374 if (rcStrict != VINF_SUCCESS)
1375 return rcStrict;
1376
1377 cbNewStack = sizeof(uint64_t) * 4;
1378 }
1379
1380 /** @todo According to Intel, new stack is checked for enough space first,
1381 * then switched. According to AMD, the stack is switched first and
1382 * then pushes might fault!
1383 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1384 * incoming stack \#PF happens before actual stack switch. AMD is
1385 * either lying or implicitly assumes that new state is committed
1386 * only if and when an instruction doesn't fault.
1387 */
1388
1389 /** @todo According to AMD, CS is loaded first, then SS.
1390 * According to Intel, it's the other way around!?
1391 */
1392
1393 /** @todo Intel and AMD disagree on when exactly the CPL changes! */
1394
1395 /* Set the accessed bit before committing new SS. */
1396 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1397 {
1398 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1399 if (rcStrict != VINF_SUCCESS)
1400 return rcStrict;
1401 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1402 }
1403
1404 /* Remember the old SS:rSP and their linear address. */
1405 RTSEL const uOldSS = pVCpu->cpum.GstCtx.ss.Sel;
1406 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
1407
1408 RTGCPTR const GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
1409
1410 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1411 or #PF, the former is not implemented in this workaround. */
1412 /** @todo Proper fix callgate target stack exceptions. */
1413 /** @todo testcase: Cover callgates with partially or fully inaccessible
1414 * target stacks. */
1415 void *pvNewFrame;
1416 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1417 rcStrict = iemMemMap(pVCpu, &pvNewFrame, &bUnmapInfo, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
1418 if (rcStrict != VINF_SUCCESS)
1419 {
1420 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1421 return rcStrict;
1422 }
1423 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1424 if (rcStrict != VINF_SUCCESS)
1425 {
1426 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1427 return rcStrict;
1428 }
1429
1430 /* Commit new SS:rSP. */
1431 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1432 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1433 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1434 pVCpu->cpum.GstCtx.ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1435 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1436 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1437 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1438 IEM_SET_CPL(pVCpu, uNewCSDpl); /** @todo Are the parameter words accessed using the new CPL or the old CPL? */
1439 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1440 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1441
1442 /* At this point the stack access must not fail because new state was already committed. */
1443 /** @todo this can still fail due to SS.LIMIT not check. */
1444 uint8_t bUnmapInfoRet;
1445 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1446 IEM_IS_LONG_MODE(pVCpu) ? 7
1447 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1,
1448 &uPtrRet.pv, &bUnmapInfoRet, &uNewRsp);
1449 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1450 VERR_INTERNAL_ERROR_5);
1451
1452 if (!IEM_IS_LONG_MODE(pVCpu))
1453 {
1454 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1455 {
1456 if (cbWords)
1457 {
1458 /* Map the relevant chunk of the old stack. */
1459 RTPTRUNION uPtrParmWds;
1460 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, &bUnmapInfo, cbWords * 4, UINT8_MAX, GCPtrParmWds,
1461 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1462 if (rcStrict != VINF_SUCCESS)
1463 {
1464 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1465 return rcStrict;
1466 }
1467
1468 /* Copy the parameter (d)words. */
1469 for (int i = 0; i < cbWords; ++i)
1470 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1471
1472 /* Unmap the old stack. */
1473 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1474 if (rcStrict != VINF_SUCCESS)
1475 {
1476 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1477 return rcStrict;
1478 }
1479 }
1480
1481 /* Push the old CS:rIP. */
1482 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1483 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1484
1485 /* Push the old SS:rSP. */
1486 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1487 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1488 }
1489 else
1490 {
1491 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1492
1493 if (cbWords)
1494 {
1495 /* Map the relevant chunk of the old stack. */
1496 RTPTRUNION uPtrParmWds;
1497 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, &bUnmapInfo, cbWords * 2, UINT8_MAX, GCPtrParmWds,
1498 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1499 if (rcStrict != VINF_SUCCESS)
1500 {
1501 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1502 return rcStrict;
1503 }
1504
1505 /* Copy the parameter words. */
1506 for (int i = 0; i < cbWords; ++i)
1507 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1508
1509 /* Unmap the old stack. */
1510 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1511 if (rcStrict != VINF_SUCCESS)
1512 {
1513 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1514 return rcStrict;
1515 }
1516 }
1517
1518 /* Push the old CS:rIP. */
1519 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1520 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1521
1522 /* Push the old SS:rSP. */
1523 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1524 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1525 }
1526 }
1527 else
1528 {
1529 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1530
1531 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1532 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1533 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1534 uPtrRet.pu64[2] = uOldRsp;
1535 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1536 }
1537
1538 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfoRet, uNewRsp);
1539 if (rcStrict != VINF_SUCCESS)
1540 {
1541 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1542 return rcStrict;
1543 }
1544
1545 /* Chop the high bits off if 16-bit gate (Intel says so). */
1546 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1547 uNewRip = (uint16_t)uNewRip;
1548
1549 /* Limit / canonical check. */
1550 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1551 if (!IEM_IS_LONG_MODE(pVCpu))
1552 {
1553 if (uNewRip > cbLimit)
1554 {
1555 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1556 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1557 }
1558 u64Base = X86DESC_BASE(&DescCS.Legacy);
1559 }
1560 else
1561 {
1562 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1563 if (!IEM_IS_CANONICAL(uNewRip))
1564 {
1565 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1566 return iemRaiseNotCanonical(pVCpu);
1567 }
1568 u64Base = 0;
1569 }
1570
1571 /*
1572 * Now set the accessed bit before
1573 * writing the return address to the stack and committing the result into
1574 * CS, CSHID and RIP.
1575 */
1576 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1577 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1578 {
1579 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1580 if (rcStrict != VINF_SUCCESS)
1581 return rcStrict;
1582 /** @todo check what VT-x and AMD-V does. */
1583 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1584 }
1585
1586 /* Commit new CS:rIP. */
1587 pVCpu->cpum.GstCtx.rip = uNewRip;
1588 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1589 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
1590 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1591 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1592 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1593 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1594 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1595 }
1596 else
1597 {
1598 /* Same privilege. */
1599 /** @todo This is very similar to regular far calls; merge! */
1600
1601 /* Check stack first - may #SS(0). */
1602 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1603 * 16-bit code cause a two or four byte CS to be pushed? */
1604 uint8_t bUnmapInfoRet;
1605 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1606 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1607 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1608 IEM_IS_LONG_MODE(pVCpu) ? 7
1609 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2,
1610 &uPtrRet.pv, &bUnmapInfoRet, &uNewRsp);
1611 if (rcStrict != VINF_SUCCESS)
1612 return rcStrict;
1613
1614 /* Chop the high bits off if 16-bit gate (Intel says so). */
1615 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1616 uNewRip = (uint16_t)uNewRip;
1617
1618 /* Limit / canonical check. */
1619 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1620 if (!IEM_IS_LONG_MODE(pVCpu))
1621 {
1622 if (uNewRip > cbLimit)
1623 {
1624 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1625 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1626 }
1627 u64Base = X86DESC_BASE(&DescCS.Legacy);
1628 }
1629 else
1630 {
1631 if (!IEM_IS_CANONICAL(uNewRip))
1632 {
1633 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1634 return iemRaiseNotCanonical(pVCpu);
1635 }
1636 u64Base = 0;
1637 }
1638
1639 /*
1640 * Now set the accessed bit before
1641 * writing the return address to the stack and committing the result into
1642 * CS, CSHID and RIP.
1643 */
1644 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1645 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1646 {
1647 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1648 if (rcStrict != VINF_SUCCESS)
1649 return rcStrict;
1650 /** @todo check what VT-x and AMD-V does. */
1651 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1652 }
1653
1654 /* stack */
1655 if (!IEM_IS_LONG_MODE(pVCpu))
1656 {
1657 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1658 {
1659 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1660 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1661 }
1662 else
1663 {
1664 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1665 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1666 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1667 }
1668 }
1669 else
1670 {
1671 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1672 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1673 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1674 }
1675
1676 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfoRet, uNewRsp);
1677 if (rcStrict != VINF_SUCCESS)
1678 return rcStrict;
1679
1680 /* commit */
1681 pVCpu->cpum.GstCtx.rip = uNewRip;
1682 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1683 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
1684 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1685 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1686 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1687 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1688 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1689 }
1690 }
1691 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1692
1693 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
1694
1695/** @todo single stepping */
1696
1697 /* Flush the prefetch buffer. */
1698 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
1699 return VINF_SUCCESS;
1700#endif /* IEM_IMPLEMENTS_CALLGATE */
1701}
1702
1703
1704/**
1705 * Implements far jumps and calls thru system selectors.
1706 *
1707 * @returns VBox strict status code.
1708 * @param pVCpu The cross context virtual CPU structure of the
1709 * calling thread.
1710 * @param cbInstr The current instruction length.
1711 * @param uSel The selector.
1712 * @param enmBranch The kind of branching we're performing.
1713 * @param enmEffOpSize The effective operand size.
1714 * @param pDesc The descriptor corresponding to @a uSel.
1715 */
1716static VBOXSTRICTRC iemCImpl_BranchSysSel(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1717 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1718{
1719 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1720 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1721 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1722
1723 if (IEM_IS_LONG_MODE(pVCpu))
1724 switch (pDesc->Legacy.Gen.u4Type)
1725 {
1726 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1727 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1728
1729 default:
1730 case AMD64_SEL_TYPE_SYS_LDT:
1731 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1732 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1733 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1734 case AMD64_SEL_TYPE_SYS_INT_GATE:
1735 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1736 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1737 }
1738
1739 switch (pDesc->Legacy.Gen.u4Type)
1740 {
1741 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1742 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1743 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1744
1745 case X86_SEL_TYPE_SYS_TASK_GATE:
1746 return iemCImpl_BranchTaskGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1747
1748 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1749 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1750 return iemCImpl_BranchTaskSegment(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1751
1752 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1753 Log(("branch %04x -> busy 286 TSS\n", uSel));
1754 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1755
1756 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1757 Log(("branch %04x -> busy 386 TSS\n", uSel));
1758 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1759
1760 default:
1761 case X86_SEL_TYPE_SYS_LDT:
1762 case X86_SEL_TYPE_SYS_286_INT_GATE:
1763 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1764 case X86_SEL_TYPE_SYS_386_INT_GATE:
1765 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1766 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1767 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1768 }
1769}
1770
1771
1772/**
1773 * Implements far jumps.
1774 *
1775 * @param uSel The selector.
1776 * @param offSeg The segment offset.
1777 * @param enmEffOpSize The effective operand size.
1778 */
1779IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1780{
1781 NOREF(cbInstr);
1782 Assert(offSeg <= UINT32_MAX || (!IEM_IS_GUEST_CPU_AMD(pVCpu) && IEM_IS_64BIT_CODE(pVCpu)));
1783
1784 /*
1785 * Real mode and V8086 mode are easy. The only snag seems to be that
1786 * CS.limit doesn't change and the limit check is done against the current
1787 * limit.
1788 */
1789 /** @todo Robert Collins claims (The Segment Descriptor Cache, DDJ August
1790 * 1998) that up to and including the Intel 486, far control
1791 * transfers in real mode set default CS attributes (0x93) and also
1792 * set a 64K segment limit. Starting with the Pentium, the
1793 * attributes and limit are left alone but the access rights are
1794 * ignored. We only implement the Pentium+ behavior.
1795 * */
1796 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1797 {
1798 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1799 if (offSeg > pVCpu->cpum.GstCtx.cs.u32Limit)
1800 {
1801 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1802 return iemRaiseGeneralProtectionFault0(pVCpu);
1803 }
1804
1805 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1806 pVCpu->cpum.GstCtx.rip = offSeg;
1807 else
1808 pVCpu->cpum.GstCtx.rip = offSeg & UINT16_MAX;
1809 pVCpu->cpum.GstCtx.cs.Sel = uSel;
1810 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
1811 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1812 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
1813
1814 /* Update the FLAT 32-bit mode flag, if we're in 32-bit unreal mode (unlikely): */
1815 if (RT_LIKELY(!IEM_IS_32BIT_CODE(pVCpu)))
1816 { /* likely */ }
1817 else if (uSel != 0)
1818 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
1819 else
1820 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
1821 | iemCalc32BitFlatIndicator(pVCpu);
1822
1823 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
1824 }
1825
1826 /*
1827 * Protected mode. Need to parse the specified descriptor...
1828 */
1829 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1830 {
1831 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1832 return iemRaiseGeneralProtectionFault0(pVCpu);
1833 }
1834
1835 /* Fetch the descriptor. */
1836 IEMSELDESC Desc;
1837 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1838 if (rcStrict != VINF_SUCCESS)
1839 return rcStrict;
1840
1841 /* Is it there? */
1842 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1843 {
1844 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1845 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1846 }
1847
1848 /*
1849 * Deal with it according to its type. We do the standard code selectors
1850 * here and dispatch the system selectors to worker functions.
1851 */
1852 if (!Desc.Legacy.Gen.u1DescType)
1853 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1854
1855 /* Only code segments. */
1856 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1857 {
1858 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1859 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1860 }
1861
1862 /* L vs D. */
1863 if ( Desc.Legacy.Gen.u1Long
1864 && Desc.Legacy.Gen.u1DefBig
1865 && IEM_IS_LONG_MODE(pVCpu))
1866 {
1867 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1868 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1869 }
1870
1871 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1872 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1873 {
1874 if (IEM_GET_CPL(pVCpu) < Desc.Legacy.Gen.u2Dpl)
1875 {
1876 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1877 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1878 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1879 }
1880 }
1881 else
1882 {
1883 if (IEM_GET_CPL(pVCpu) != Desc.Legacy.Gen.u2Dpl)
1884 {
1885 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1886 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1887 }
1888 if ((uSel & X86_SEL_RPL) > IEM_GET_CPL(pVCpu))
1889 {
1890 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu)));
1891 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1892 }
1893 }
1894
1895 /* Chop the high bits if 16-bit (Intel says so). */
1896 if (enmEffOpSize == IEMMODE_16BIT)
1897 offSeg &= UINT16_MAX;
1898
1899 /* Limit check and get the base. */
1900 uint64_t u64Base;
1901 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1902 if ( !Desc.Legacy.Gen.u1Long
1903 || !IEM_IS_LONG_MODE(pVCpu))
1904 {
1905 if (RT_LIKELY(offSeg <= cbLimit))
1906 u64Base = X86DESC_BASE(&Desc.Legacy);
1907 else
1908 {
1909 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1910 /** @todo Intel says this is \#GP(0)! */
1911 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1912 }
1913 }
1914 else
1915 u64Base = 0;
1916
1917 /*
1918 * Ok, everything checked out fine. Now set the accessed bit before
1919 * committing the result into CS, CSHID and RIP.
1920 */
1921 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1922 {
1923 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1924 if (rcStrict != VINF_SUCCESS)
1925 return rcStrict;
1926 /** @todo check what VT-x and AMD-V does. */
1927 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1928 }
1929
1930 /* commit */
1931 pVCpu->cpum.GstCtx.rip = offSeg;
1932 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1933 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu); /** @todo is this right for conforming segs? or in general? */
1934 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1935 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1936 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1937 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1938 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1939
1940 /** @todo check if the hidden bits are loaded correctly for 64-bit
1941 * mode. */
1942
1943 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
1944
1945 /* Flush the prefetch buffer. */
1946 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
1947
1948 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
1949}
1950
1951
1952/**
1953 * Implements far calls.
1954 *
1955 * This very similar to iemCImpl_FarJmp.
1956 *
1957 * @param uSel The selector.
1958 * @param offSeg The segment offset.
1959 * @param enmEffOpSize The operand size (in case we need it).
1960 */
1961IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1962{
1963 VBOXSTRICTRC rcStrict;
1964 uint64_t uNewRsp;
1965 RTPTRUNION uPtrRet;
1966 uint8_t bUnmapInfo;
1967
1968 /*
1969 * Real mode and V8086 mode are easy. The only snag seems to be that
1970 * CS.limit doesn't change and the limit check is done against the current
1971 * limit.
1972 */
1973 /** @todo See comment for similar code in iemCImpl_FarJmp */
1974 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1975 {
1976 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1977
1978 /* Check stack first - may #SS(0). */
1979 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1980 enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
1981 &uPtrRet.pv, &bUnmapInfo, &uNewRsp);
1982 if (rcStrict != VINF_SUCCESS)
1983 return rcStrict;
1984
1985 /* Check the target address range. */
1986/** @todo this must be wrong! Write unreal mode tests! */
1987 if (offSeg > UINT32_MAX)
1988 return iemRaiseGeneralProtectionFault0(pVCpu);
1989
1990 /* Everything is fine, push the return address. */
1991 if (enmEffOpSize == IEMMODE_16BIT)
1992 {
1993 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1994 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1995 }
1996 else
1997 {
1998 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1999 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel;
2000 }
2001 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2002 if (rcStrict != VINF_SUCCESS)
2003 return rcStrict;
2004
2005 /* Branch. */
2006 pVCpu->cpum.GstCtx.rip = offSeg;
2007 pVCpu->cpum.GstCtx.cs.Sel = uSel;
2008 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
2009 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2010 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
2011
2012 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2013 }
2014
2015 /*
2016 * Protected mode. Need to parse the specified descriptor...
2017 */
2018 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2019 {
2020 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2021 return iemRaiseGeneralProtectionFault0(pVCpu);
2022 }
2023
2024 /* Fetch the descriptor. */
2025 IEMSELDESC Desc;
2026 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2027 if (rcStrict != VINF_SUCCESS)
2028 return rcStrict;
2029
2030 /*
2031 * Deal with it according to its type. We do the standard code selectors
2032 * here and dispatch the system selectors to worker functions.
2033 */
2034 if (!Desc.Legacy.Gen.u1DescType)
2035 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2036
2037 /* Only code segments. */
2038 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2039 {
2040 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2041 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2042 }
2043
2044 /* L vs D. */
2045 if ( Desc.Legacy.Gen.u1Long
2046 && Desc.Legacy.Gen.u1DefBig
2047 && IEM_IS_LONG_MODE(pVCpu))
2048 {
2049 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2050 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2051 }
2052
2053 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2054 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2055 {
2056 if (IEM_GET_CPL(pVCpu) < Desc.Legacy.Gen.u2Dpl)
2057 {
2058 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2059 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2060 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2061 }
2062 }
2063 else
2064 {
2065 if (IEM_GET_CPL(pVCpu) != Desc.Legacy.Gen.u2Dpl)
2066 {
2067 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2068 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2069 }
2070 if ((uSel & X86_SEL_RPL) > IEM_GET_CPL(pVCpu))
2071 {
2072 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu)));
2073 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2074 }
2075 }
2076
2077 /* Is it there? */
2078 if (!Desc.Legacy.Gen.u1Present)
2079 {
2080 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2081 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2082 }
2083
2084 /* Check stack first - may #SS(0). */
2085 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2086 * 16-bit code cause a two or four byte CS to be pushed? */
2087 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2088 enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2089 enmEffOpSize == IEMMODE_64BIT ? 7 : enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2090 &uPtrRet.pv, &bUnmapInfo, &uNewRsp);
2091 if (rcStrict != VINF_SUCCESS)
2092 return rcStrict;
2093
2094 /* Chop the high bits if 16-bit (Intel says so). */
2095 if (enmEffOpSize == IEMMODE_16BIT)
2096 offSeg &= UINT16_MAX;
2097
2098 /* Limit / canonical check. */
2099 uint64_t u64Base;
2100 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2101 if ( !Desc.Legacy.Gen.u1Long
2102 || !IEM_IS_LONG_MODE(pVCpu))
2103 {
2104 if (RT_LIKELY(offSeg <= cbLimit))
2105 u64Base = X86DESC_BASE(&Desc.Legacy);
2106 else
2107 {
2108 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2109 /** @todo Intel says this is \#GP(0)! */
2110 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2111 }
2112 }
2113 else if (IEM_IS_CANONICAL(offSeg))
2114 u64Base = 0;
2115 else
2116 {
2117 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2118 return iemRaiseNotCanonical(pVCpu);
2119 }
2120
2121 /*
2122 * Now set the accessed bit before
2123 * writing the return address to the stack and committing the result into
2124 * CS, CSHID and RIP.
2125 */
2126 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2127 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2128 {
2129 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2130 if (rcStrict != VINF_SUCCESS)
2131 return rcStrict;
2132 /** @todo check what VT-x and AMD-V does. */
2133 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2134 }
2135
2136 /* stack */
2137 if (enmEffOpSize == IEMMODE_16BIT)
2138 {
2139 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2140 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2141 }
2142 else if (enmEffOpSize == IEMMODE_32BIT)
2143 {
2144 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2145 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2146 }
2147 else
2148 {
2149 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
2150 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2151 }
2152 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2153 if (rcStrict != VINF_SUCCESS)
2154 return rcStrict;
2155
2156 /* commit */
2157 pVCpu->cpum.GstCtx.rip = offSeg;
2158 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2159 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
2160 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2161 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2162 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2163 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2164 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2165
2166 /** @todo check if the hidden bits are loaded correctly for 64-bit
2167 * mode. */
2168
2169 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
2170
2171 /* Flush the prefetch buffer. */
2172 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2173
2174 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2175}
2176
2177
2178/**
2179 * Implements retf.
2180 *
2181 * @param enmEffOpSize The effective operand size.
2182 * @param cbPop The amount of arguments to pop from the stack
2183 * (bytes).
2184 */
2185IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2186{
2187 NOREF(cbInstr);
2188
2189 /*
2190 * Read the stack values first.
2191 */
2192 RTUINT64U NewRsp;
2193 uint8_t bUnmapInfo;
2194 RTCPTRUNION uPtrFrame;
2195 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2196 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2197 VBOXSTRICTRC rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
2198 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
2199 &uPtrFrame.pv, &bUnmapInfo, &NewRsp.u);
2200 if (rcStrict != VINF_SUCCESS)
2201 return rcStrict;
2202
2203 uint64_t uNewRip;
2204 uint16_t uNewCs;
2205 if (enmEffOpSize == IEMMODE_16BIT)
2206 {
2207 uNewRip = uPtrFrame.pu16[0];
2208 uNewCs = uPtrFrame.pu16[1];
2209 }
2210 else if (enmEffOpSize == IEMMODE_32BIT)
2211 {
2212 uNewRip = uPtrFrame.pu32[0];
2213 uNewCs = uPtrFrame.pu16[2];
2214 }
2215 else
2216 {
2217 uNewRip = uPtrFrame.pu64[0];
2218 uNewCs = uPtrFrame.pu16[4];
2219 }
2220
2221 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo);
2222 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2223 { /* extremely likely */ }
2224 else
2225 return rcStrict;
2226
2227 /*
2228 * Real mode and V8086 mode are easy.
2229 */
2230 /** @todo See comment for similar code in iemCImpl_FarJmp */
2231 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2232 {
2233 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2234 /** @todo check how this is supposed to work if sp=0xfffe. */
2235
2236 /* Check the limit of the new EIP. */
2237 /** @todo Intel pseudo code only does the limit check for 16-bit
2238 * operands, AMD does not make any distinction. What is right? */
2239 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
2240 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2241
2242 /* commit the operation. */
2243 if (cbPop)
2244 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2245 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2246 pVCpu->cpum.GstCtx.rip = uNewRip;
2247 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2248 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2249 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2250 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2251 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2252 }
2253
2254 /*
2255 * Protected mode is complicated, of course.
2256 */
2257 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2258 {
2259 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2260 return iemRaiseGeneralProtectionFault0(pVCpu);
2261 }
2262
2263 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2264
2265 /* Fetch the descriptor. */
2266 IEMSELDESC DescCs;
2267 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2268 if (rcStrict != VINF_SUCCESS)
2269 return rcStrict;
2270
2271 /* Can only return to a code selector. */
2272 if ( !DescCs.Legacy.Gen.u1DescType
2273 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2274 {
2275 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2276 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2277 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2278 }
2279
2280 /* L vs D. */
2281 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2282 && DescCs.Legacy.Gen.u1DefBig
2283 && IEM_IS_LONG_MODE(pVCpu))
2284 {
2285 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2286 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2287 }
2288
2289 /* DPL/RPL/CPL checks. */
2290 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
2291 {
2292 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, IEM_GET_CPL(pVCpu)));
2293 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2294 }
2295
2296 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2297 {
2298 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2299 {
2300 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2301 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2302 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2303 }
2304 }
2305 else
2306 {
2307 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2308 {
2309 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2310 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2311 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2312 }
2313 }
2314
2315 /* Is it there? */
2316 if (!DescCs.Legacy.Gen.u1Present)
2317 {
2318 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2319 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2320 }
2321
2322 /*
2323 * Return to outer privilege? (We'll typically have entered via a call gate.)
2324 */
2325 if ((uNewCs & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
2326 {
2327 /* Read the outer stack pointer stored *after* the parameters. */
2328 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, &bUnmapInfo, NewRsp.u);
2329 if (rcStrict != VINF_SUCCESS)
2330 return rcStrict;
2331
2332 uint16_t uNewOuterSs;
2333 RTUINT64U NewOuterRsp;
2334 if (enmEffOpSize == IEMMODE_16BIT)
2335 {
2336 NewOuterRsp.u = uPtrFrame.pu16[0];
2337 uNewOuterSs = uPtrFrame.pu16[1];
2338 }
2339 else if (enmEffOpSize == IEMMODE_32BIT)
2340 {
2341 NewOuterRsp.u = uPtrFrame.pu32[0];
2342 uNewOuterSs = uPtrFrame.pu16[2];
2343 }
2344 else
2345 {
2346 NewOuterRsp.u = uPtrFrame.pu64[0];
2347 uNewOuterSs = uPtrFrame.pu16[4];
2348 }
2349 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo);
2350 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2351 { /* extremely likely */ }
2352 else
2353 return rcStrict;
2354
2355 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2356 and read the selector. */
2357 IEMSELDESC DescSs;
2358 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2359 {
2360 if ( !DescCs.Legacy.Gen.u1Long
2361 || (uNewOuterSs & X86_SEL_RPL) == 3)
2362 {
2363 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2364 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2365 return iemRaiseGeneralProtectionFault0(pVCpu);
2366 }
2367 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2368 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2369 }
2370 else
2371 {
2372 /* Fetch the descriptor for the new stack segment. */
2373 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2374 if (rcStrict != VINF_SUCCESS)
2375 return rcStrict;
2376 }
2377
2378 /* Check that RPL of stack and code selectors match. */
2379 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2380 {
2381 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2382 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2383 }
2384
2385 /* Must be a writable data segment. */
2386 if ( !DescSs.Legacy.Gen.u1DescType
2387 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2388 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2389 {
2390 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2391 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2392 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2393 }
2394
2395 /* L vs D. (Not mentioned by intel.) */
2396 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2397 && DescSs.Legacy.Gen.u1DefBig
2398 && IEM_IS_LONG_MODE(pVCpu))
2399 {
2400 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2401 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2402 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2403 }
2404
2405 /* DPL/RPL/CPL checks. */
2406 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2407 {
2408 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2409 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2410 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2411 }
2412
2413 /* Is it there? */
2414 if (!DescSs.Legacy.Gen.u1Present)
2415 {
2416 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2417 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2418 }
2419
2420 /* Calc SS limit.*/
2421 uint64_t u64BaseSs;
2422 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2423
2424 /* Is RIP canonical or within CS.limit? */
2425 uint64_t u64BaseCs;
2426 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2427
2428 /** @todo Testcase: Is this correct? */
2429 if ( DescCs.Legacy.Gen.u1Long
2430 && IEM_IS_LONG_MODE(pVCpu) )
2431 {
2432 if (!IEM_IS_CANONICAL(uNewRip))
2433 {
2434 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2435 return iemRaiseNotCanonical(pVCpu);
2436 }
2437 u64BaseCs = 0;
2438 u64BaseSs = 0;
2439 }
2440 else
2441 {
2442 if (uNewRip > cbLimitCs)
2443 {
2444 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2445 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, cbLimitCs));
2446 /** @todo Intel says this is \#GP(0)! */
2447 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2448 }
2449 u64BaseCs = X86DESC_BASE(&DescCs.Legacy);
2450 u64BaseSs = X86DESC_BASE(&DescSs.Legacy);
2451 }
2452
2453 /*
2454 * Now set the accessed bit before
2455 * writing the return address to the stack and committing the result into
2456 * CS, CSHID and RIP.
2457 */
2458 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2459 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2460 {
2461 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2462 if (rcStrict != VINF_SUCCESS)
2463 return rcStrict;
2464 /** @todo check what VT-x and AMD-V does. */
2465 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2466 }
2467 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2468 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2469 {
2470 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2471 if (rcStrict != VINF_SUCCESS)
2472 return rcStrict;
2473 /** @todo check what VT-x and AMD-V does. */
2474 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2475 }
2476
2477 /* commit */
2478 if (enmEffOpSize == IEMMODE_16BIT)
2479 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2480 else
2481 pVCpu->cpum.GstCtx.rip = uNewRip;
2482 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2483 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2484 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2485 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2486 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2487 pVCpu->cpum.GstCtx.cs.u64Base = u64BaseCs;
2488 pVCpu->cpum.GstCtx.ss.Sel = uNewOuterSs;
2489 pVCpu->cpum.GstCtx.ss.ValidSel = uNewOuterSs;
2490 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2491 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2492 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
2493 pVCpu->cpum.GstCtx.ss.u64Base = u64BaseSs;
2494
2495 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
2496 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
2497 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
2498 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
2499
2500 iemRecalcExecModeAndCplAndAcFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */
2501
2502 if (cbPop)
2503 iemRegAddToRspEx(pVCpu, &NewOuterRsp, cbPop);
2504 if (IEM_IS_64BIT_CODE(pVCpu))
2505 pVCpu->cpum.GstCtx.rsp = NewOuterRsp.u;
2506 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2507 pVCpu->cpum.GstCtx.rsp = (uint32_t)NewOuterRsp.u;
2508 else
2509 pVCpu->cpum.GstCtx.sp = (uint16_t)NewOuterRsp.u;
2510
2511 iemRecalcExecModeAndCplAndAcFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */
2512
2513 /** @todo check if the hidden bits are loaded correctly for 64-bit
2514 * mode. */
2515 }
2516 /*
2517 * Return to the same privilege level
2518 */
2519 else
2520 {
2521 /* Limit / canonical check. */
2522 uint64_t u64Base;
2523 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2524
2525 /** @todo Testcase: Is this correct? */
2526 bool f64BitCs = false;
2527 if ( DescCs.Legacy.Gen.u1Long
2528 && IEM_IS_LONG_MODE(pVCpu) )
2529 {
2530 if (!IEM_IS_CANONICAL(uNewRip))
2531 {
2532 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2533 return iemRaiseNotCanonical(pVCpu);
2534 }
2535 u64Base = 0;
2536 f64BitCs = true;
2537 f64BitCs = true;
2538 }
2539 else
2540 {
2541 if (uNewRip > cbLimitCs)
2542 {
2543 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2544 /** @todo Intel says this is \#GP(0)! */
2545 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2546 }
2547 u64Base = X86DESC_BASE(&DescCs.Legacy);
2548 }
2549
2550 /*
2551 * Now set the accessed bit before
2552 * writing the return address to the stack and committing the result into
2553 * CS, CSHID and RIP.
2554 */
2555 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2556 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2557 {
2558 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2559 if (rcStrict != VINF_SUCCESS)
2560 return rcStrict;
2561 /** @todo check what VT-x and AMD-V does. */
2562 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2563 }
2564
2565 /* commit */
2566 if (cbPop)
2567/** @todo This cannot be right. We're using the old CS mode here, and iemRegAddToRspEx checks fExec. */
2568 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2569 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig || f64BitCs)
2570 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2571 else
2572 pVCpu->cpum.GstCtx.sp = (uint16_t)NewRsp.u;
2573 if (enmEffOpSize == IEMMODE_16BIT)
2574 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2575 else
2576 pVCpu->cpum.GstCtx.rip = uNewRip;
2577 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2578 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2579 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2580 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2581 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2582 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2583 /** @todo check if the hidden bits are loaded correctly for 64-bit
2584 * mode. */
2585
2586 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
2587 }
2588
2589 /* Flush the prefetch buffer. */
2590 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo use light flush for same privilege? */
2591
2592 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2593}
2594
2595
2596/**
2597 * Implements enter.
2598 *
2599 * We're doing this in C because the instruction is insane, even for the
2600 * u8NestingLevel=0 case dealing with the stack is tedious.
2601 *
2602 * @param enmEffOpSize The effective operand size.
2603 * @param cbFrame Frame size.
2604 * @param cParameters Frame parameter count.
2605 */
2606IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2607{
2608 /* Push RBP, saving the old value in TmpRbp. */
2609 RTUINT64U NewRsp; NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2610 RTUINT64U TmpRbp; TmpRbp.u = pVCpu->cpum.GstCtx.rbp;
2611 RTUINT64U NewRbp;
2612 VBOXSTRICTRC rcStrict;
2613 if (enmEffOpSize == IEMMODE_64BIT)
2614 {
2615 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2616 NewRbp = NewRsp;
2617 }
2618 else if (enmEffOpSize == IEMMODE_32BIT)
2619 {
2620 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2621 NewRbp = NewRsp;
2622 }
2623 else
2624 {
2625 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2626 NewRbp = TmpRbp;
2627 NewRbp.Words.w0 = NewRsp.Words.w0;
2628 }
2629 if (rcStrict != VINF_SUCCESS)
2630 return rcStrict;
2631
2632 /* Copy the parameters (aka nesting levels by Intel). */
2633 cParameters &= 0x1f;
2634 if (cParameters > 0)
2635 {
2636 switch (enmEffOpSize)
2637 {
2638 case IEMMODE_16BIT:
2639 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2640 TmpRbp.DWords.dw0 -= 2;
2641 else
2642 TmpRbp.Words.w0 -= 2;
2643 do
2644 {
2645 uint16_t u16Tmp;
2646 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2647 if (rcStrict != VINF_SUCCESS)
2648 break;
2649 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2650 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2651 break;
2652
2653 case IEMMODE_32BIT:
2654 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2655 TmpRbp.DWords.dw0 -= 4;
2656 else
2657 TmpRbp.Words.w0 -= 4;
2658 do
2659 {
2660 uint32_t u32Tmp;
2661 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2662 if (rcStrict != VINF_SUCCESS)
2663 break;
2664 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2665 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2666 break;
2667
2668 case IEMMODE_64BIT:
2669 TmpRbp.u -= 8;
2670 do
2671 {
2672 uint64_t u64Tmp;
2673 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2674 if (rcStrict != VINF_SUCCESS)
2675 break;
2676 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2677 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2678 break;
2679
2680 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2681 }
2682 if (rcStrict != VINF_SUCCESS)
2683 return VINF_SUCCESS;
2684
2685 /* Push the new RBP */
2686 if (enmEffOpSize == IEMMODE_64BIT)
2687 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2688 else if (enmEffOpSize == IEMMODE_32BIT)
2689 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2690 else
2691 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2692 if (rcStrict != VINF_SUCCESS)
2693 return rcStrict;
2694
2695 }
2696
2697 /* Recalc RSP. */
2698 iemRegSubFromRspEx(pVCpu, &NewRsp, cbFrame);
2699
2700 /** @todo Should probe write access at the new RSP according to AMD. */
2701 /** @todo Should handle accesses to the VMX APIC-access page. */
2702
2703 /* Commit it. */
2704 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2705 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2706 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2707}
2708
2709
2710
2711/**
2712 * Implements leave.
2713 *
2714 * We're doing this in C because messing with the stack registers is annoying
2715 * since they depends on SS attributes.
2716 *
2717 * @param enmEffOpSize The effective operand size.
2718 */
2719IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2720{
2721 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2722 RTUINT64U NewRsp;
2723 if (IEM_IS_64BIT_CODE(pVCpu))
2724 NewRsp.u = pVCpu->cpum.GstCtx.rbp;
2725 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2726 NewRsp.u = pVCpu->cpum.GstCtx.ebp;
2727 else
2728 {
2729 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2730 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2731 NewRsp.Words.w0 = pVCpu->cpum.GstCtx.bp;
2732 }
2733
2734 /* Pop RBP according to the operand size. */
2735 VBOXSTRICTRC rcStrict;
2736 RTUINT64U NewRbp;
2737 switch (enmEffOpSize)
2738 {
2739 case IEMMODE_16BIT:
2740 NewRbp.u = pVCpu->cpum.GstCtx.rbp;
2741 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2742 break;
2743 case IEMMODE_32BIT:
2744 NewRbp.u = 0;
2745 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2746 break;
2747 case IEMMODE_64BIT:
2748 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2749 break;
2750 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2751 }
2752 if (rcStrict != VINF_SUCCESS)
2753 return rcStrict;
2754
2755
2756 /* Commit it. */
2757 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2758 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2759 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2760}
2761
2762
2763/**
2764 * Implements int3 and int XX.
2765 *
2766 * @param u8Int The interrupt vector number.
2767 * @param enmInt The int instruction type.
2768 */
2769IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2770{
2771 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2772
2773 /*
2774 * We must check if this INT3 might belong to DBGF before raising a #BP.
2775 */
2776 if (u8Int == 3)
2777 {
2778 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2779 if (pVM->dbgf.ro.cEnabledSwBreakpoints == 0)
2780 { /* likely: No vbox debugger breakpoints */ }
2781 else
2782 {
2783 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVM, pVCpu, &pVCpu->cpum.GstCtx);
2784 Log(("iemCImpl_int: DBGFTrap03Handler -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2785 if (rcStrict != VINF_EM_RAW_GUEST_TRAP)
2786 return iemSetPassUpStatus(pVCpu, rcStrict);
2787 }
2788 }
2789/** @todo single stepping */
2790 return iemRaiseXcptOrInt(pVCpu,
2791 cbInstr,
2792 u8Int,
2793 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2794 0,
2795 0);
2796}
2797
2798
2799/**
2800 * Implements iret for real mode and V8086 mode.
2801 *
2802 * @param enmEffOpSize The effective operand size.
2803 */
2804IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2805{
2806 X86EFLAGS Efl;
2807 Efl.u = IEMMISC_GET_EFL(pVCpu);
2808 NOREF(cbInstr);
2809
2810 /*
2811 * iret throws an exception if VME isn't enabled.
2812 */
2813 if ( Efl.Bits.u1VM
2814 && Efl.Bits.u2IOPL != 3
2815 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
2816 return iemRaiseGeneralProtectionFault0(pVCpu);
2817
2818 /*
2819 * Do the stack bits, but don't commit RSP before everything checks
2820 * out right.
2821 */
2822 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2823 VBOXSTRICTRC rcStrict;
2824 uint8_t bUnmapInfo;
2825 RTCPTRUNION uFrame;
2826 uint16_t uNewCs;
2827 uint32_t uNewEip;
2828 uint32_t uNewFlags;
2829 uint64_t uNewRsp;
2830 if (enmEffOpSize == IEMMODE_32BIT)
2831 {
2832 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
2833 if (rcStrict != VINF_SUCCESS)
2834 return rcStrict;
2835 uNewEip = uFrame.pu32[0];
2836 if (uNewEip > UINT16_MAX)
2837 return iemRaiseGeneralProtectionFault0(pVCpu);
2838
2839 uNewCs = (uint16_t)uFrame.pu32[1];
2840 uNewFlags = uFrame.pu32[2];
2841 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2842 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2843 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2844 | X86_EFL_ID;
2845 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
2846 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2847 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2848 }
2849 else
2850 {
2851 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
2852 if (rcStrict != VINF_SUCCESS)
2853 return rcStrict;
2854 uNewEip = uFrame.pu16[0];
2855 uNewCs = uFrame.pu16[1];
2856 uNewFlags = uFrame.pu16[2];
2857 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2858 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2859 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2860 /** @todo The intel pseudo code does not indicate what happens to
2861 * reserved flags. We just ignore them. */
2862 /* Ancient CPU adjustments: See iemCImpl_popf. */
2863 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
2864 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2865 }
2866 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo);
2867 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2868 { /* extremely likely */ }
2869 else
2870 return rcStrict;
2871
2872 /** @todo Check how this is supposed to work if sp=0xfffe. */
2873 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2874 uNewCs, uNewEip, uNewFlags, uNewRsp));
2875
2876 /*
2877 * Check the limit of the new EIP.
2878 */
2879 /** @todo Only the AMD pseudo code check the limit here, what's
2880 * right? */
2881 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
2882 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2883
2884 /*
2885 * V8086 checks and flag adjustments
2886 */
2887 if (Efl.Bits.u1VM)
2888 {
2889 if (Efl.Bits.u2IOPL == 3)
2890 {
2891 /* Preserve IOPL and clear RF. */
2892 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2893 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2894 }
2895 else if ( enmEffOpSize == IEMMODE_16BIT
2896 && ( !(uNewFlags & X86_EFL_IF)
2897 || !Efl.Bits.u1VIP )
2898 && !(uNewFlags & X86_EFL_TF) )
2899 {
2900 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2901 uNewFlags &= ~X86_EFL_VIF;
2902 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2903 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2904 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2905 }
2906 else
2907 return iemRaiseGeneralProtectionFault0(pVCpu);
2908 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2909 }
2910
2911 /*
2912 * Commit the operation.
2913 */
2914 IEMTLBTRACE_IRET(pVCpu, uNewCs, uNewEip, uNewFlags);
2915#ifdef DBGFTRACE_ENABLED
2916 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2917 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2918#endif
2919 pVCpu->cpum.GstCtx.rsp = uNewRsp;
2920 pVCpu->cpum.GstCtx.rip = uNewEip;
2921 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2922 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2923 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2924 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2925 /** @todo do we load attribs and limit as well? */
2926 Assert(uNewFlags & X86_EFL_1);
2927 IEMMISC_SET_EFL(pVCpu, uNewFlags);
2928 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_X86_AC) | iemCalcExecAcFlag(pVCpu);
2929
2930 /* Flush the prefetch buffer. */
2931 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo can do light flush in real mode at least */
2932
2933/** @todo single stepping */
2934 return VINF_SUCCESS;
2935}
2936
2937
2938/**
2939 * Loads a segment register when entering V8086 mode.
2940 *
2941 * @param pSReg The segment register.
2942 * @param uSeg The segment to load.
2943 */
2944static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2945{
2946 pSReg->Sel = uSeg;
2947 pSReg->ValidSel = uSeg;
2948 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2949 pSReg->u64Base = (uint32_t)uSeg << 4;
2950 pSReg->u32Limit = 0xffff;
2951 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2952 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2953 * IRET'ing to V8086. */
2954}
2955
2956
2957/**
2958 * Implements iret for protected mode returning to V8086 mode.
2959 *
2960 * @param uNewEip The new EIP.
2961 * @param uNewCs The new CS.
2962 * @param uNewFlags The new EFLAGS.
2963 * @param uNewRsp The RSP after the initial IRET frame.
2964 *
2965 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
2966 */
2967IEM_CIMPL_DEF_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp)
2968{
2969 RT_NOREF_PV(cbInstr);
2970 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
2971
2972 /*
2973 * Pop the V8086 specific frame bits off the stack.
2974 */
2975 uint8_t bUnmapInfo;
2976 RTCPTRUNION uFrame;
2977 VBOXSTRICTRC rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp);
2978 if (rcStrict != VINF_SUCCESS)
2979 return rcStrict;
2980 uint32_t uNewEsp = uFrame.pu32[0];
2981 uint16_t uNewSs = uFrame.pu32[1];
2982 uint16_t uNewEs = uFrame.pu32[2];
2983 uint16_t uNewDs = uFrame.pu32[3];
2984 uint16_t uNewFs = uFrame.pu32[4];
2985 uint16_t uNewGs = uFrame.pu32[5];
2986 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */
2987 if (rcStrict != VINF_SUCCESS)
2988 return rcStrict;
2989
2990 /*
2991 * Commit the operation.
2992 */
2993 uNewFlags &= X86_EFL_LIVE_MASK;
2994 uNewFlags |= X86_EFL_RA1_MASK;
2995#ifdef DBGFTRACE_ENABLED
2996 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
2997 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
2998#endif
2999 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3000
3001 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3002 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.cs, uNewCs);
3003 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ss, uNewSs);
3004 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.es, uNewEs);
3005 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ds, uNewDs);
3006 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.fs, uNewFs);
3007 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.gs, uNewGs);
3008 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip;
3009 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */
3010 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
3011 | (3 << IEM_F_X86_CPL_SHIFT)
3012 | IEM_F_MODE_X86_16BIT_PROT_V86
3013 | iemCalcExecAcFlag(pVCpu);
3014
3015 /* Flush the prefetch buffer. */
3016 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
3017
3018/** @todo single stepping */
3019 return VINF_SUCCESS;
3020}
3021
3022
3023/**
3024 * Implements iret for protected mode returning via a nested task.
3025 *
3026 * @param enmEffOpSize The effective operand size.
3027 */
3028IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3029{
3030 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3031#ifndef IEM_IMPLEMENTS_TASKSWITCH
3032 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3033#else
3034 RT_NOREF_PV(enmEffOpSize);
3035
3036 /*
3037 * Read the segment selector in the link-field of the current TSS.
3038 */
3039 RTSEL uSelRet;
3040 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base);
3041 if (rcStrict != VINF_SUCCESS)
3042 return rcStrict;
3043
3044 /*
3045 * Fetch the returning task's TSS descriptor from the GDT.
3046 */
3047 if (uSelRet & X86_SEL_LDT)
3048 {
3049 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3050 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3051 }
3052
3053 IEMSELDESC TssDesc;
3054 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3055 if (rcStrict != VINF_SUCCESS)
3056 return rcStrict;
3057
3058 if (TssDesc.Legacy.Gate.u1DescType)
3059 {
3060 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3061 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3062 }
3063
3064 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3065 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3066 {
3067 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3068 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3069 }
3070
3071 if (!TssDesc.Legacy.Gate.u1Present)
3072 {
3073 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3074 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3075 }
3076
3077 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
3078 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3079 0 /* uCr2 */, uSelRet, &TssDesc);
3080#endif
3081}
3082
3083
3084/**
3085 * Implements iret for protected mode
3086 *
3087 * @param enmEffOpSize The effective operand size.
3088 */
3089IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3090{
3091 NOREF(cbInstr);
3092 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3093
3094 /*
3095 * Nested task return.
3096 */
3097 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3098 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3099
3100 /*
3101 * Normal return.
3102 *
3103 * Do the stack bits, but don't commit RSP before everything checks
3104 * out right.
3105 */
3106 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3107 uint8_t bUnmapInfo;
3108 VBOXSTRICTRC rcStrict;
3109 RTCPTRUNION uFrame;
3110 uint16_t uNewCs;
3111 uint32_t uNewEip;
3112 uint32_t uNewFlags;
3113 uint64_t uNewRsp;
3114 if (enmEffOpSize == IEMMODE_32BIT)
3115 {
3116 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3117 if (rcStrict != VINF_SUCCESS)
3118 return rcStrict;
3119 uNewEip = uFrame.pu32[0];
3120 uNewCs = (uint16_t)uFrame.pu32[1];
3121 uNewFlags = uFrame.pu32[2];
3122 }
3123 else
3124 {
3125 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3126 if (rcStrict != VINF_SUCCESS)
3127 return rcStrict;
3128 uNewEip = uFrame.pu16[0];
3129 uNewCs = uFrame.pu16[1];
3130 uNewFlags = uFrame.pu16[2];
3131 }
3132 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */
3133 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3134 { /* extremely likely */ }
3135 else
3136 return rcStrict;
3137 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, IEM_GET_CPL(pVCpu)));
3138
3139 /*
3140 * We're hopefully not returning to V8086 mode...
3141 */
3142 if ( (uNewFlags & X86_EFL_VM)
3143 && IEM_GET_CPL(pVCpu) == 0)
3144 {
3145 Assert(enmEffOpSize == IEMMODE_32BIT);
3146 return IEM_CIMPL_CALL_4(iemCImpl_iret_prot_v8086, uNewEip, uNewCs, uNewFlags, uNewRsp);
3147 }
3148
3149 /*
3150 * Protected mode.
3151 */
3152 /* Read the CS descriptor. */
3153 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3154 {
3155 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3156 return iemRaiseGeneralProtectionFault0(pVCpu);
3157 }
3158
3159 IEMSELDESC DescCS;
3160 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3161 if (rcStrict != VINF_SUCCESS)
3162 {
3163 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3164 return rcStrict;
3165 }
3166
3167 /* Must be a code descriptor. */
3168 if (!DescCS.Legacy.Gen.u1DescType)
3169 {
3170 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3171 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3172 }
3173 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3174 {
3175 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3176 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3177 }
3178
3179 /* Privilege checks. */
3180 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3181 {
3182 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3183 {
3184 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3185 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3186 }
3187 }
3188 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3189 {
3190 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3191 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3192 }
3193 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
3194 {
3195 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, IEM_GET_CPL(pVCpu)));
3196 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3197 }
3198
3199 /* Present? */
3200 if (!DescCS.Legacy.Gen.u1Present)
3201 {
3202 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3203 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3204 }
3205
3206 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3207
3208 /*
3209 * Return to outer level?
3210 */
3211 if ((uNewCs & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
3212 {
3213 uint16_t uNewSS;
3214 uint32_t uNewESP;
3215 if (enmEffOpSize == IEMMODE_32BIT)
3216 {
3217 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp);
3218 if (rcStrict != VINF_SUCCESS)
3219 return rcStrict;
3220/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3221 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3222 * bit of the popped SS selector it turns out. */
3223 uNewESP = uFrame.pu32[0];
3224 uNewSS = (uint16_t)uFrame.pu32[1];
3225 }
3226 else
3227 {
3228 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp);
3229 if (rcStrict != VINF_SUCCESS)
3230 return rcStrict;
3231 uNewESP = uFrame.pu16[0];
3232 uNewSS = uFrame.pu16[1];
3233 }
3234 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
3235 if (rcStrict != VINF_SUCCESS)
3236 return rcStrict;
3237 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3238
3239 /* Read the SS descriptor. */
3240 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3241 {
3242 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3243 return iemRaiseGeneralProtectionFault0(pVCpu);
3244 }
3245
3246 IEMSELDESC DescSS;
3247 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3248 if (rcStrict != VINF_SUCCESS)
3249 {
3250 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3251 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3252 return rcStrict;
3253 }
3254
3255 /* Privilege checks. */
3256 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3257 {
3258 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3259 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3260 }
3261 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3262 {
3263 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3264 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3265 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3266 }
3267
3268 /* Must be a writeable data segment descriptor. */
3269 if (!DescSS.Legacy.Gen.u1DescType)
3270 {
3271 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3272 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3273 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3274 }
3275 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3276 {
3277 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3278 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3279 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3280 }
3281
3282 /* Present? */
3283 if (!DescSS.Legacy.Gen.u1Present)
3284 {
3285 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3286 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3287 }
3288
3289 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3290
3291 /* Check EIP. */
3292 if (uNewEip > cbLimitCS)
3293 {
3294 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3295 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3296 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3297 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3298 }
3299
3300 /*
3301 * Commit the changes, marking CS and SS accessed first since
3302 * that may fail.
3303 */
3304 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3305 {
3306 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3307 if (rcStrict != VINF_SUCCESS)
3308 return rcStrict;
3309 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3310 }
3311 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3312 {
3313 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3314 if (rcStrict != VINF_SUCCESS)
3315 return rcStrict;
3316 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3317 }
3318
3319 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3320 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3321 if (enmEffOpSize != IEMMODE_16BIT)
3322 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3323 if (IEM_GET_CPL(pVCpu) == 0)
3324 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3325 else if (IEM_GET_CPL(pVCpu) <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3326 fEFlagsMask |= X86_EFL_IF;
3327 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3328 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3329 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3330 fEFlagsNew &= ~fEFlagsMask;
3331 fEFlagsNew |= uNewFlags & fEFlagsMask;
3332 IEMTLBTRACE_IRET(pVCpu, uNewCs, uNewEip, fEFlagsNew);
3333#ifdef DBGFTRACE_ENABLED
3334 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3335 IEM_GET_CPL(pVCpu), uNewCs & X86_SEL_RPL, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3336 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3337#endif
3338
3339 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3340 pVCpu->cpum.GstCtx.rip = uNewEip;
3341 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3342 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3343 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3344 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3345 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3346 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3347
3348 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3349 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3350 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3351 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3352 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3353 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3354 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3355 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewESP;
3356 else
3357 pVCpu->cpum.GstCtx.rsp = uNewESP;
3358
3359 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
3360 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
3361 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
3362 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
3363
3364 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
3365
3366 /* Done! */
3367
3368 }
3369 /*
3370 * Return to the same level.
3371 */
3372 else
3373 {
3374 /* Check EIP. */
3375 if (uNewEip > cbLimitCS)
3376 {
3377 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3378 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3379 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3380 }
3381
3382 /*
3383 * Commit the changes, marking CS first since it may fail.
3384 */
3385 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3386 {
3387 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3388 if (rcStrict != VINF_SUCCESS)
3389 return rcStrict;
3390 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3391 }
3392
3393 X86EFLAGS NewEfl;
3394 NewEfl.u = IEMMISC_GET_EFL(pVCpu);
3395 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3396 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3397 if (enmEffOpSize != IEMMODE_16BIT)
3398 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3399 if (IEM_GET_CPL(pVCpu) == 0)
3400 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3401 else if (IEM_GET_CPL(pVCpu) <= NewEfl.Bits.u2IOPL)
3402 fEFlagsMask |= X86_EFL_IF;
3403 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3404 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3405 NewEfl.u &= ~fEFlagsMask;
3406 NewEfl.u |= fEFlagsMask & uNewFlags;
3407 IEMTLBTRACE_IRET(pVCpu, uNewCs, uNewEip, NewEfl.u);
3408#ifdef DBGFTRACE_ENABLED
3409 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3410 IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3411 uNewCs, uNewEip, uNewFlags, pVCpu->cpum.GstCtx.ss.Sel, uNewRsp);
3412#endif
3413
3414 IEMMISC_SET_EFL(pVCpu, NewEfl.u);
3415 pVCpu->cpum.GstCtx.rip = uNewEip;
3416 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3417 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3418 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3419 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3420 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3421 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3422 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3423 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3424 else
3425 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3426
3427 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
3428
3429 /* Done! */
3430 }
3431
3432 /* Flush the prefetch buffer. */
3433 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if same ring? */
3434
3435/** @todo single stepping */
3436 return VINF_SUCCESS;
3437}
3438
3439
3440/**
3441 * Implements iret for long mode
3442 *
3443 * @param enmEffOpSize The effective operand size.
3444 */
3445IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3446{
3447 NOREF(cbInstr);
3448
3449 /*
3450 * Nested task return is not supported in long mode.
3451 */
3452 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3453 {
3454 Log(("iret/64 with NT=1 (eflags=%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.eflags.u));
3455 return iemRaiseGeneralProtectionFault0(pVCpu);
3456 }
3457
3458 /*
3459 * Normal return.
3460 *
3461 * Do the stack bits, but don't commit RSP before everything checks
3462 * out right.
3463 */
3464 VBOXSTRICTRC rcStrict;
3465 uint8_t bUnmapInfo;
3466 RTCPTRUNION uFrame;
3467 uint64_t uNewRip;
3468 uint16_t uNewCs;
3469 uint16_t uNewSs;
3470 uint32_t uNewFlags;
3471 uint64_t uNewRsp;
3472 if (enmEffOpSize == IEMMODE_64BIT)
3473 {
3474 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3475 if (rcStrict != VINF_SUCCESS)
3476 return rcStrict;
3477 uNewRip = uFrame.pu64[0];
3478 uNewCs = (uint16_t)uFrame.pu64[1];
3479 uNewFlags = (uint32_t)uFrame.pu64[2];
3480 uNewRsp = uFrame.pu64[3];
3481 uNewSs = (uint16_t)uFrame.pu64[4];
3482 }
3483 else if (enmEffOpSize == IEMMODE_32BIT)
3484 {
3485 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3486 if (rcStrict != VINF_SUCCESS)
3487 return rcStrict;
3488 uNewRip = uFrame.pu32[0];
3489 uNewCs = (uint16_t)uFrame.pu32[1];
3490 uNewFlags = uFrame.pu32[2];
3491 uNewRsp = uFrame.pu32[3];
3492 uNewSs = (uint16_t)uFrame.pu32[4];
3493 }
3494 else
3495 {
3496 Assert(enmEffOpSize == IEMMODE_16BIT);
3497 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3498 if (rcStrict != VINF_SUCCESS)
3499 return rcStrict;
3500 uNewRip = uFrame.pu16[0];
3501 uNewCs = uFrame.pu16[1];
3502 uNewFlags = uFrame.pu16[2];
3503 uNewRsp = uFrame.pu16[3];
3504 uNewSs = uFrame.pu16[4];
3505 }
3506 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */
3507 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3508 { /* extremely like */ }
3509 else
3510 return rcStrict;
3511 Log7(("iret/64 stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3512
3513 /*
3514 * Check stuff.
3515 */
3516 /* Read the CS descriptor. */
3517 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3518 {
3519 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3520 return iemRaiseGeneralProtectionFault0(pVCpu);
3521 }
3522
3523 IEMSELDESC DescCS;
3524 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3525 if (rcStrict != VINF_SUCCESS)
3526 {
3527 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3528 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3529 return rcStrict;
3530 }
3531
3532 /* Must be a code descriptor. */
3533 if ( !DescCS.Legacy.Gen.u1DescType
3534 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3535 {
3536 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3537 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3538 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3539 }
3540
3541 /* Privilege checks. */
3542 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3543 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3544 {
3545 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3546 {
3547 Log(("iret/64 %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3548 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3549 }
3550 }
3551 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3552 {
3553 Log(("iret/64 %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3554 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3555 }
3556 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
3557 {
3558 Log(("iret/64 %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, IEM_GET_CPL(pVCpu)));
3559 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3560 }
3561
3562 /* Present? */
3563 if (!DescCS.Legacy.Gen.u1Present)
3564 {
3565 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3566 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3567 }
3568
3569 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3570
3571 /* Read the SS descriptor. */
3572 IEMSELDESC DescSS;
3573 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3574 {
3575 if ( !DescCS.Legacy.Gen.u1Long
3576 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3577 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3578 {
3579 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3580 return iemRaiseGeneralProtectionFault0(pVCpu);
3581 }
3582 /* Make sure SS is sensible, marked as accessed etc. */
3583 iemMemFakeStackSelDesc(&DescSS, (uNewSs & X86_SEL_RPL));
3584 }
3585 else
3586 {
3587 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3588 if (rcStrict != VINF_SUCCESS)
3589 {
3590 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3591 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3592 return rcStrict;
3593 }
3594 }
3595
3596 /* Privilege checks. */
3597 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3598 {
3599 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3600 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3601 }
3602
3603 uint32_t cbLimitSs;
3604 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3605 cbLimitSs = UINT32_MAX;
3606 else
3607 {
3608 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3609 {
3610 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3611 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3612 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3613 }
3614
3615 /* Must be a writeable data segment descriptor. */
3616 if (!DescSS.Legacy.Gen.u1DescType)
3617 {
3618 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3619 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3620 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3621 }
3622 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3623 {
3624 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3625 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3626 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3627 }
3628
3629 /* Present? */
3630 if (!DescSS.Legacy.Gen.u1Present)
3631 {
3632 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3633 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3634 }
3635 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3636 }
3637
3638 /* Check EIP. */
3639 if (DescCS.Legacy.Gen.u1Long)
3640 {
3641 if (!IEM_IS_CANONICAL(uNewRip))
3642 {
3643 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3644 return iemRaiseNotCanonical(pVCpu);
3645 }
3646/** @todo check the location of this... Testcase. */
3647 if (RT_LIKELY(!DescCS.Legacy.Gen.u1DefBig))
3648 { /* likely */ }
3649 else
3650 {
3651 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 -> both L and D are set -> #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3652 return iemRaiseGeneralProtectionFault0(pVCpu);
3653 }
3654 }
3655 else
3656 {
3657 if (uNewRip > cbLimitCS)
3658 {
3659 Log(("iret/64 %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3660 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3661 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3662 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3663 }
3664 }
3665
3666 /*
3667 * Commit the changes, marking CS and SS accessed first since
3668 * that may fail.
3669 */
3670 /** @todo where exactly are these actually marked accessed by a real CPU? */
3671 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3672 {
3673 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3674 if (rcStrict != VINF_SUCCESS)
3675 return rcStrict;
3676 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3677 }
3678 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3679 {
3680 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3681 if (rcStrict != VINF_SUCCESS)
3682 return rcStrict;
3683 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3684 }
3685
3686 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3687 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3688 if (enmEffOpSize != IEMMODE_16BIT)
3689 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3690 if (IEM_GET_CPL(pVCpu) == 0)
3691 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3692 else if (IEM_GET_CPL(pVCpu) <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3693 fEFlagsMask |= X86_EFL_IF;
3694 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3695 fEFlagsNew &= ~fEFlagsMask;
3696 fEFlagsNew |= uNewFlags & fEFlagsMask;
3697 IEMTLBTRACE_IRET(pVCpu, uNewCs, uNewRip, fEFlagsNew);
3698#ifdef DBGFTRACE_ENABLED
3699 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/64/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3700 IEM_GET_CPL(pVCpu), uNewCpl, pVCpu->cpum.GstCtx.rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3701#endif
3702
3703 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3704 pVCpu->cpum.GstCtx.rip = uNewRip;
3705 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3706 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3707 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3708 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3709 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3710 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3711 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
3712 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3713 else
3714 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3715 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
3716 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
3717 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3718 {
3719 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3720 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3721 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3722 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3723 Log2(("iret/64 new SS: NULL\n"));
3724 }
3725 else
3726 {
3727 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3728 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3729 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3730 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3731 Log2(("iret/64 new SS: base=%#RX64 lim=%#x attr=%#x\n", pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3732 }
3733
3734 if (IEM_GET_CPL(pVCpu) != uNewCpl)
3735 {
3736 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.ds);
3737 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.es);
3738 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.fs);
3739 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.gs);
3740 }
3741
3742 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
3743
3744 /* Flush the prefetch buffer. */
3745 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if the ring + mode doesn't change */
3746
3747/** @todo single stepping */
3748 return VINF_SUCCESS;
3749}
3750
3751
3752/**
3753 * Implements iret.
3754 *
3755 * @param enmEffOpSize The effective operand size.
3756 */
3757IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3758{
3759 bool fBlockingNmi = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
3760
3761 if (!IEM_IS_IN_GUEST(pVCpu))
3762 { /* probable */ }
3763#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3764 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3765 {
3766 /*
3767 * Record whether NMI (or virtual-NMI) blocking is in effect during the execution
3768 * of this IRET instruction. We need to provide this information as part of some
3769 * VM-exits.
3770 *
3771 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3772 */
3773 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_VIRT_NMI))
3774 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking;
3775 else
3776 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi;
3777
3778 /*
3779 * If "NMI exiting" is set, IRET does not affect blocking of NMIs.
3780 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3781 */
3782 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_NMI_EXIT))
3783 fBlockingNmi = false;
3784
3785 /* Clear virtual-NMI blocking, if any, before causing any further exceptions. */
3786 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
3787 }
3788#endif
3789 /*
3790 * The SVM nested-guest intercept for IRET takes priority over all exceptions,
3791 * The NMI is still held pending (which I assume means blocking of further NMIs
3792 * is in effect).
3793 *
3794 * See AMD spec. 15.9 "Instruction Intercepts".
3795 * See AMD spec. 15.21.9 "NMI Support".
3796 */
3797 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3798 {
3799 Log(("iret: Guest intercept -> #VMEXIT\n"));
3800 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
3801 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3802 }
3803
3804 /*
3805 * Clear NMI blocking, if any, before causing any further exceptions.
3806 * See Intel spec. 6.7.1 "Handling Multiple NMIs".
3807 */
3808 if (fBlockingNmi)
3809 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3810
3811 /*
3812 * Call a mode specific worker.
3813 */
3814 VBOXSTRICTRC rcStrict;
3815 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3816 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3817 else
3818 {
3819 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3820 if (IEM_IS_64BIT_CODE(pVCpu))
3821 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3822 else
3823 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3824 }
3825
3826#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3827 /*
3828 * Clear NMI unblocking IRET state with the completion of IRET.
3829 */
3830 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3831 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = false;
3832#endif
3833 return rcStrict;
3834}
3835
3836
3837static void iemLoadallSetSelector(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
3838{
3839 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3840
3841 pHid->Sel = uSel;
3842 pHid->ValidSel = uSel;
3843 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3844}
3845
3846
3847static void iemLoadall286SetDescCache(PVMCPUCC pVCpu, uint8_t iSegReg, uint8_t const *pbMem)
3848{
3849 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3850
3851 /* The base is in the first three bytes. */
3852 pHid->u64Base = pbMem[0] + (pbMem[1] << 8) + (pbMem[2] << 16);
3853 /* The attributes are in the fourth byte. */
3854 pHid->Attr.u = pbMem[3];
3855 pHid->Attr.u &= ~(X86DESCATTR_L | X86DESCATTR_D); /* (just to be on the safe side) */
3856 /* The limit is in the last two bytes. */
3857 pHid->u32Limit = pbMem[4] + (pbMem[5] << 8);
3858}
3859
3860
3861/**
3862 * Implements 286 LOADALL (286 CPUs only).
3863 */
3864IEM_CIMPL_DEF_0(iemCImpl_loadall286)
3865{
3866 NOREF(cbInstr);
3867
3868 /* Data is loaded from a buffer at 800h. No checks are done on the
3869 * validity of loaded state.
3870 *
3871 * LOADALL only loads the internal CPU state, it does not access any
3872 * GDT, LDT, or similar tables.
3873 */
3874
3875 if (IEM_GET_CPL(pVCpu) != 0)
3876 {
3877 Log(("loadall286: CPL must be 0 not %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
3878 return iemRaiseGeneralProtectionFault0(pVCpu);
3879 }
3880
3881 uint8_t bUnmapInfo;
3882 uint8_t const *pbMem = NULL;
3883 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */
3884 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, &bUnmapInfo, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
3885 if (rcStrict != VINF_SUCCESS)
3886 return rcStrict;
3887
3888 /* The MSW is at offset 0x06. */
3889 uint16_t const *pau16Mem = (uint16_t const *)(pbMem + 0x06);
3890 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */
3891 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3892 uNewCr0 |= *pau16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3893 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
3894
3895 CPUMSetGuestCR0(pVCpu, uNewCr0);
3896 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCr0);
3897
3898 /* Inform PGM if mode changed. */
3899 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE))
3900 {
3901 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
3902 AssertRCReturn(rc, rc);
3903 /* ignore informational status codes */
3904 }
3905 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
3906 false /* fForce */);
3907
3908 /* TR selector is at offset 0x16. */
3909 pau16Mem = (uint16_t const *)(pbMem + 0x16);
3910 pVCpu->cpum.GstCtx.tr.Sel = pau16Mem[0];
3911 pVCpu->cpum.GstCtx.tr.ValidSel = pau16Mem[0];
3912 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3913
3914 /* Followed by FLAGS... */
3915 pVCpu->cpum.GstCtx.eflags.u = pau16Mem[1] | X86_EFL_1;
3916 pVCpu->cpum.GstCtx.ip = pau16Mem[2]; /* ...and IP. */
3917
3918 /* LDT is at offset 0x1C. */
3919 pau16Mem = (uint16_t const *)(pbMem + 0x1C);
3920 pVCpu->cpum.GstCtx.ldtr.Sel = pau16Mem[0];
3921 pVCpu->cpum.GstCtx.ldtr.ValidSel = pau16Mem[0];
3922 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3923
3924 /* Segment registers are at offset 0x1E. */
3925 pau16Mem = (uint16_t const *)(pbMem + 0x1E);
3926 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pau16Mem[0]);
3927 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pau16Mem[1]);
3928 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pau16Mem[2]);
3929 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pau16Mem[3]);
3930
3931 /* GPRs are at offset 0x26. */
3932 pau16Mem = (uint16_t const *)(pbMem + 0x26);
3933 pVCpu->cpum.GstCtx.di = pau16Mem[0];
3934 pVCpu->cpum.GstCtx.si = pau16Mem[1];
3935 pVCpu->cpum.GstCtx.bp = pau16Mem[2];
3936 pVCpu->cpum.GstCtx.sp = pau16Mem[3];
3937 pVCpu->cpum.GstCtx.bx = pau16Mem[4];
3938 pVCpu->cpum.GstCtx.dx = pau16Mem[5];
3939 pVCpu->cpum.GstCtx.cx = pau16Mem[6];
3940 pVCpu->cpum.GstCtx.ax = pau16Mem[7];
3941
3942 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */
3943 iemLoadall286SetDescCache(pVCpu, X86_SREG_ES, pbMem + 0x36);
3944 iemLoadall286SetDescCache(pVCpu, X86_SREG_CS, pbMem + 0x3C);
3945 iemLoadall286SetDescCache(pVCpu, X86_SREG_SS, pbMem + 0x42);
3946 iemLoadall286SetDescCache(pVCpu, X86_SREG_DS, pbMem + 0x48);
3947
3948 /* GDTR contents are at offset 0x4E, 6 bytes. */
3949 uint8_t const *pau8Mem = pbMem + 0x4E;
3950 /* NB: Fourth byte "should be zero"; we are ignoring it. */
3951 RTGCPHYS GCPtrBase = pau8Mem[0] + ((uint32_t)pau8Mem[1] << 8) + ((uint32_t)pau8Mem[2] << 16);
3952 uint16_t cbLimit = pau8Mem[4] + ((uint32_t)pau8Mem[5] << 8);
3953 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
3954
3955 /* IDTR contents are at offset 0x5A, 6 bytes. */
3956 pau8Mem = pbMem + 0x5A;
3957 GCPtrBase = pau8Mem[0] + ((uint32_t)pau8Mem[1] << 8) + ((uint32_t)pau8Mem[2] << 16);
3958 cbLimit = pau8Mem[4] + ((uint32_t)pau8Mem[5] << 8);
3959 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
3960
3961 Log(("LOADALL: GDTR:%08RX64/%04X, IDTR:%08RX64/%04X\n", pVCpu->cpum.GstCtx.gdtr.pGdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.idtr.pIdt, pVCpu->cpum.GstCtx.idtr.cbIdt));
3962 Log(("LOADALL: CS:%04X, CS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.cs.u64Base, pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.Attr.u));
3963 Log(("LOADALL: DS:%04X, DS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.ds.u64Base, pVCpu->cpum.GstCtx.ds.u32Limit, pVCpu->cpum.GstCtx.ds.Attr.u));
3964 Log(("LOADALL: ES:%04X, ES base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.es.Sel, pVCpu->cpum.GstCtx.es.u64Base, pVCpu->cpum.GstCtx.es.u32Limit, pVCpu->cpum.GstCtx.es.Attr.u));
3965 Log(("LOADALL: SS:%04X, SS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3966 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx));
3967
3968 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
3969 if (rcStrict != VINF_SUCCESS)
3970 return rcStrict;
3971
3972 /*
3973 * The CPL may change and protected mode may change enabled. It is taken
3974 * from the "DPL fields of the SS and CS descriptor caches" but there is no
3975 * word as to what happens if those are not identical (probably bad things).
3976 */
3977 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
3978 Assert(IEM_IS_16BIT_CODE(pVCpu));
3979
3980 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS | CPUM_CHANGED_IDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_TR | CPUM_CHANGED_LDTR);
3981
3982 /* Flush the prefetch buffer. */
3983 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
3984
3985/** @todo single stepping */
3986 return rcStrict;
3987}
3988
3989
3990/**
3991 * Implements SYSCALL (AMD and Intel64).
3992 */
3993IEM_CIMPL_DEF_0(iemCImpl_syscall)
3994{
3995
3996
3997 /*
3998 * Check preconditions.
3999 *
4000 * Note that CPUs described in the documentation may load a few odd values
4001 * into CS and SS than we allow here. This has yet to be checked on real
4002 * hardware.
4003 */
4004 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4005 {
4006 Log(("syscall: Not enabled in EFER -> #UD\n"));
4007 return iemRaiseUndefinedOpcode(pVCpu);
4008 }
4009 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4010 {
4011 Log(("syscall: Protected mode is required -> #GP(0)\n"));
4012 return iemRaiseGeneralProtectionFault0(pVCpu);
4013 }
4014 if ( IEM_IS_GUEST_CPU_INTEL(pVCpu)
4015 && !IEM_IS_64BIT_CODE(pVCpu)) //&& !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4016 {
4017 Log(("syscall: Only available in 64-bit mode on intel -> #UD\n"));
4018 return iemRaiseUndefinedOpcode(pVCpu);
4019 }
4020
4021 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4022
4023 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
4024 /** @todo what about LDT selectors? Shouldn't matter, really. */
4025 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4026 uint16_t uNewSs = uNewCs + 8;
4027 if (uNewCs == 0 || uNewSs == 0)
4028 {
4029 /** @todo Neither Intel nor AMD document this check. */
4030 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4031 return iemRaiseGeneralProtectionFault0(pVCpu);
4032 }
4033
4034 /*
4035 * Hack alert! Convert incoming debug events to slient on Intel.
4036 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4037 */
4038 if ( !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4039 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4040 { /* ignore */ }
4041 else
4042 {
4043 Log(("iemCImpl_syscall: Converting pending %#x debug events to a silent one (intel hack)\n",
4044 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4045 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4046 | CPUMCTX_DBG_HIT_DRX_SILENT;
4047 }
4048
4049 /*
4050 * Long mode and legacy mode differs.
4051 */
4052 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4053 {
4054 uint64_t uNewRip = IEM_IS_64BIT_CODE(pVCpu) ? pVCpu->cpum.GstCtx.msrLSTAR : pVCpu->cpum.GstCtx. msrCSTAR;
4055
4056 /* This test isn't in the docs, but I'm not trusting the guys writing
4057 the MSRs to have validated the values as canonical like they should. */
4058 if (!IEM_IS_CANONICAL(uNewRip))
4059 {
4060 /** @todo Intel claims this can't happen because IA32_LSTAR MSR can't be written with non-canonical address. */
4061 Log(("syscall: New RIP not canonical -> #UD\n"));
4062 return iemRaiseUndefinedOpcode(pVCpu);
4063 }
4064
4065 /*
4066 * Commit it.
4067 */
4068 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, uNewRip));
4069 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.rip + cbInstr;
4070 pVCpu->cpum.GstCtx.rip = uNewRip;
4071
4072 pVCpu->cpum.GstCtx.rflags.u &= ~X86_EFL_RF;
4073 pVCpu->cpum.GstCtx.r11 = pVCpu->cpum.GstCtx.rflags.u;
4074 pVCpu->cpum.GstCtx.rflags.u &= ~pVCpu->cpum.GstCtx.msrSFMASK;
4075 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_RA1_MASK;
4076
4077 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4078 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4079
4080 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
4081 | IEM_F_MODE_X86_64BIT;
4082 }
4083 else
4084 {
4085 /*
4086 * Commit it.
4087 */
4088 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, (uint32_t)(pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
4089 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.eip + cbInstr;
4090 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
4091 pVCpu->cpum.GstCtx.rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
4092
4093 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4094 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4095
4096 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
4097 | IEM_F_MODE_X86_32BIT_PROT
4098 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4099 }
4100 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
4101 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
4102 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4103 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4104 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4105
4106 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
4107 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
4108 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4109 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4110 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4111
4112 /* Flush the prefetch buffer. */
4113 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4114
4115 /*
4116 * Handle debug events.
4117 * If TF isn't masked, we're supposed to raise a single step #DB.
4118 */
4119 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4120}
4121
4122
4123/**
4124 * Implements SYSRET (AMD and Intel64).
4125 *
4126 * @param enmEffOpSize The effective operand size.
4127 */
4128IEM_CIMPL_DEF_1(iemCImpl_sysret, IEMMODE, enmEffOpSize)
4129
4130{
4131 RT_NOREF_PV(cbInstr);
4132
4133 /*
4134 * Check preconditions.
4135 *
4136 * Note that CPUs described in the documentation may load a few odd values
4137 * into CS and SS than we allow here. This has yet to be checked on real
4138 * hardware.
4139 */
4140 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4141 {
4142 Log(("sysret: Not enabled in EFER -> #UD\n"));
4143 return iemRaiseUndefinedOpcode(pVCpu);
4144 }
4145 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4146 {
4147 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4148 return iemRaiseUndefinedOpcode(pVCpu);
4149 }
4150 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4151 {
4152 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4153 return iemRaiseGeneralProtectionFault0(pVCpu);
4154 }
4155 if (IEM_GET_CPL(pVCpu) != 0)
4156 {
4157 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4158 return iemRaiseGeneralProtectionFault0(pVCpu);
4159 }
4160
4161 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4162
4163 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4164 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4165 uint16_t uNewSs = uNewCs + 8;
4166 if (enmEffOpSize == IEMMODE_64BIT)
4167 uNewCs += 16;
4168 if (uNewCs == 0 || uNewSs == 0)
4169 {
4170 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4171 return iemRaiseGeneralProtectionFault0(pVCpu);
4172 }
4173
4174 /*
4175 * Commit it.
4176 */
4177 bool f32Bit = true;
4178 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4179 {
4180 if (enmEffOpSize == IEMMODE_64BIT)
4181 {
4182 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.r11));
4183 /* Note! We disregard intel manual regarding the RCX canonical
4184 check, ask intel+xen why AMD doesn't do it. */
4185 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4186 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4187 | (3 << X86DESCATTR_DPL_SHIFT);
4188 f32Bit = false;
4189 }
4190 else
4191 {
4192 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.r11));
4193 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.ecx;
4194 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4195 | (3 << X86DESCATTR_DPL_SHIFT);
4196 }
4197 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4198 * what it really ignores. RF and VM are hinted at being zero, by AMD.
4199 * Intel says: RFLAGS := (R11 & 3C7FD7H) | 2; */
4200 pVCpu->cpum.GstCtx.rflags.u = pVCpu->cpum.GstCtx.r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4201 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_RA1_MASK;
4202 }
4203 else
4204 {
4205 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx));
4206 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4207 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_IF;
4208 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4209 | (3 << X86DESCATTR_DPL_SHIFT);
4210 }
4211 pVCpu->cpum.GstCtx.cs.Sel = uNewCs | 3;
4212 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs | 3;
4213 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4214 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4215 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4216
4217 /* The SS hidden bits remains unchanged says AMD, we presume they set DPL to 3.
4218 Intel (and presuably VIA) OTOH sets loads valid ring-3 values it seems, see
4219 X86_BUG_SYSRET_SS_ATTRS in linux 5.3. */
4220 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
4221 {
4222 Log(("sysret: ss:rsp=%04x:%08RX64 attr=%x -> %04x:%08RX64 attr=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.ss.Attr.u, uNewSs | 3, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.ss.Attr.u | (3 << X86DESCATTR_DPL_SHIFT) ));
4223 pVCpu->cpum.GstCtx.ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4224 }
4225 else
4226 {
4227 Log(("sysret: ss:rsp=%04x:%08RX64 attr=%x -> %04x:%08RX64 attr=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.ss.Attr.u, uNewSs | 3, pVCpu->cpum.GstCtx.rsp, X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT) ));
4228 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC
4229 | (3 << X86DESCATTR_DPL_SHIFT);
4230 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4231 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4232 }
4233 pVCpu->cpum.GstCtx.ss.Sel = uNewSs | 3;
4234 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs | 3;
4235 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4236 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4237 * on sysret on AMD and not on intel. */
4238
4239 if (!f32Bit)
4240 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
4241 | (3 << IEM_F_X86_CPL_SHIFT)
4242 | IEM_F_MODE_X86_64BIT
4243 | iemCalcExecAcFlag(pVCpu);
4244 else
4245 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
4246 | (3 << IEM_F_X86_CPL_SHIFT)
4247 | IEM_F_MODE_X86_32BIT_PROT
4248 /** @todo sort out the SS.BASE/LIM/ATTR claim by AMD and maybe we can switch to
4249 * iemCalc32BitFlatIndicatorDsEs and move this up into the above branch. */
4250 | iemCalc32BitFlatIndicator(pVCpu)
4251 | iemCalcExecAcFlag(pVCpu);
4252
4253 /* Flush the prefetch buffer. */
4254 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4255
4256/** @todo single step */
4257 return VINF_SUCCESS;
4258}
4259
4260
4261/**
4262 * Implements SYSENTER (Intel, 32-bit AMD).
4263 */
4264IEM_CIMPL_DEF_0(iemCImpl_sysenter)
4265{
4266 RT_NOREF(cbInstr);
4267
4268 /*
4269 * Check preconditions.
4270 *
4271 * Note that CPUs described in the documentation may load a few odd values
4272 * into CS and SS than we allow here. This has yet to be checked on real
4273 * hardware.
4274 */
4275 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4276 {
4277 Log(("sysenter: not supported -=> #UD\n"));
4278 return iemRaiseUndefinedOpcode(pVCpu);
4279 }
4280 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4281 {
4282 Log(("sysenter: Protected or long mode is required -> #GP(0)\n"));
4283 return iemRaiseGeneralProtectionFault0(pVCpu);
4284 }
4285 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4286 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4287 {
4288 Log(("sysenter: Only available in protected mode on AMD -> #UD\n"));
4289 return iemRaiseUndefinedOpcode(pVCpu);
4290 }
4291 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4292 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4293 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4294 {
4295 Log(("sysenter: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4296 return iemRaiseGeneralProtectionFault0(pVCpu);
4297 }
4298
4299 /* This test isn't in the docs, it's just a safeguard against missing
4300 canonical checks when writing the registers. */
4301 if (RT_LIKELY( !fIsLongMode
4302 || ( IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.eip)
4303 && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.esp))))
4304 { /* likely */ }
4305 else
4306 {
4307 Log(("sysenter: SYSENTER_EIP = %#RX64 or/and SYSENTER_ESP = %#RX64 not canonical -> #GP(0)\n",
4308 pVCpu->cpum.GstCtx.SysEnter.eip, pVCpu->cpum.GstCtx.SysEnter.esp));
4309 return iemRaiseUndefinedOpcode(pVCpu);
4310 }
4311
4312/** @todo Test: Sysenter from ring-0, ring-1 and ring-2. */
4313
4314 /*
4315 * Update registers and commit.
4316 */
4317 if (fIsLongMode)
4318 {
4319 Log(("sysenter: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
4320 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, pVCpu->cpum.GstCtx.SysEnter.eip));
4321 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.SysEnter.eip;
4322 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.SysEnter.esp;
4323 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4324 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4325 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
4326 | IEM_F_MODE_X86_64BIT;
4327 }
4328 else
4329 {
4330 Log(("sysenter: %04x:%08RX32 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, (uint32_t)pVCpu->cpum.GstCtx.rip,
4331 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip));
4332 pVCpu->cpum.GstCtx.rip = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip;
4333 pVCpu->cpum.GstCtx.rsp = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.esp;
4334 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4335 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4336 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
4337 | IEM_F_MODE_X86_32BIT_PROT
4338 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4339 }
4340 pVCpu->cpum.GstCtx.cs.Sel = uNewCs & X86_SEL_MASK_OFF_RPL;
4341 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs & X86_SEL_MASK_OFF_RPL;
4342 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4343 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4344 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4345
4346 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4347 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4348 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4349 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4350 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4351 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC;
4352 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4353
4354 pVCpu->cpum.GstCtx.rflags.Bits.u1IF = 0;
4355 pVCpu->cpum.GstCtx.rflags.Bits.u1VM = 0;
4356 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4357
4358 /* Flush the prefetch buffer. */
4359 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4360
4361/** @todo single stepping */
4362 return VINF_SUCCESS;
4363}
4364
4365
4366/**
4367 * Implements SYSEXIT (Intel, 32-bit AMD).
4368 *
4369 * @param enmEffOpSize The effective operand size.
4370 */
4371IEM_CIMPL_DEF_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize)
4372{
4373 RT_NOREF(cbInstr);
4374
4375 /*
4376 * Check preconditions.
4377 *
4378 * Note that CPUs described in the documentation may load a few odd values
4379 * into CS and SS than we allow here. This has yet to be checked on real
4380 * hardware.
4381 */
4382 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4383 {
4384 Log(("sysexit: not supported -=> #UD\n"));
4385 return iemRaiseUndefinedOpcode(pVCpu);
4386 }
4387 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4388 {
4389 Log(("sysexit: Protected or long mode is required -> #GP(0)\n"));
4390 return iemRaiseGeneralProtectionFault0(pVCpu);
4391 }
4392 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4393 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4394 {
4395 Log(("sysexit: Only available in protected mode on AMD -> #UD\n"));
4396 return iemRaiseUndefinedOpcode(pVCpu);
4397 }
4398 if (IEM_GET_CPL(pVCpu) != 0)
4399 {
4400 Log(("sysexit: CPL(=%u) != 0 -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4401 return iemRaiseGeneralProtectionFault0(pVCpu);
4402 }
4403 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4404 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4405 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4406 {
4407 Log(("sysexit: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4408 return iemRaiseGeneralProtectionFault0(pVCpu);
4409 }
4410
4411 /*
4412 * Update registers and commit.
4413 */
4414 if (enmEffOpSize == IEMMODE_64BIT)
4415 {
4416 Log(("sysexit: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
4417 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 32, pVCpu->cpum.GstCtx.rcx));
4418 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rdx;
4419 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.rcx;
4420 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4421 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4422 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 32;
4423 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 32;
4424 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 40;
4425 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 40;
4426
4427 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4428 | (3 << IEM_F_X86_CPL_SHIFT)
4429 | IEM_F_MODE_X86_64BIT
4430 | iemCalcExecAcFlag(pVCpu);
4431 }
4432 else
4433 {
4434 Log(("sysexit: %04x:%08RX64 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
4435 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 16, (uint32_t)pVCpu->cpum.GstCtx.edx));
4436 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.edx;
4437 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.ecx;
4438 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4439 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4440 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 16;
4441 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 16;
4442 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 24;
4443 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 24;
4444
4445 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4446 | (3 << IEM_F_X86_CPL_SHIFT)
4447 | IEM_F_MODE_X86_32BIT_PROT
4448 | iemCalc32BitFlatIndicatorEsDs(pVCpu)
4449 | iemCalcExecAcFlag(pVCpu);
4450 }
4451 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4452 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4453 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4454
4455 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4456 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4457 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4458 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4459 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4460 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4461
4462/** @todo single stepping */
4463
4464 /* Flush the prefetch buffer. */
4465 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4466
4467 return VINF_SUCCESS;
4468}
4469
4470
4471/**
4472 * Completes a MOV SReg,XXX or POP SReg instruction.
4473 *
4474 * When not modifying SS or when we're already in an interrupt shadow we
4475 * can update RIP and finish the instruction the normal way.
4476 *
4477 * Otherwise, the MOV/POP SS interrupt shadow that we now enable will block
4478 * both TF and DBx events. The TF will be ignored while the DBx ones will
4479 * be delayed till the next instruction boundrary. For more details see
4480 * @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching Stacks}.
4481 */
4482DECLINLINE(VBOXSTRICTRC) iemCImpl_LoadSRegFinish(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iSegReg)
4483{
4484 if (iSegReg != X86_SREG_SS || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4485 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4486
4487 iemRegAddToRip(pVCpu, cbInstr);
4488 pVCpu->cpum.GstCtx.eflags.uBoth &= ~X86_EFL_RF; /* Shadow int isn't set and DRx is delayed, so only clear RF. */
4489 CPUMSetInInterruptShadowSs(&pVCpu->cpum.GstCtx);
4490
4491 return VINF_SUCCESS;
4492}
4493
4494
4495/**
4496 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4497 *
4498 * @param pVCpu The cross context virtual CPU structure of the calling
4499 * thread.
4500 * @param iSegReg The segment register number (valid).
4501 * @param uSel The new selector value.
4502 */
4503static VBOXSTRICTRC iemCImpl_LoadSRegWorker(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
4504{
4505 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4506 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4507 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4508
4509 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4510
4511 /*
4512 * Real mode and V8086 mode are easy.
4513 */
4514 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4515 {
4516 *pSel = uSel;
4517 pHid->u64Base = (uint32_t)uSel << 4;
4518 pHid->ValidSel = uSel;
4519 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4520#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4521 /** @todo Does the CPU actually load limits and attributes in the
4522 * real/V8086 mode segment load case? It doesn't for CS in far
4523 * jumps... Affects unreal mode. */
4524 pHid->u32Limit = 0xffff;
4525 pHid->Attr.u = 0;
4526 pHid->Attr.n.u1Present = 1;
4527 pHid->Attr.n.u1DescType = 1;
4528 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4529 ? X86_SEL_TYPE_RW
4530 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4531#endif
4532
4533 /* Update the FLAT 32-bit mode flag, if we're in 32-bit unreal mode (unlikely): */
4534 if (RT_LIKELY(!IEM_IS_32BIT_CODE(pVCpu)))
4535 { /* likely */ }
4536 else if (uSel != 0)
4537 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
4538 else
4539 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
4540 | iemCalc32BitFlatIndicator(pVCpu);
4541 }
4542 /*
4543 * Protected / long mode - null segment.
4544 *
4545 * Check if it's a null segment selector value first, that's OK for DS, ES,
4546 * FS and GS. If not null, then we have to load and parse the descriptor.
4547 */
4548 else if (!(uSel & X86_SEL_MASK_OFF_RPL))
4549 {
4550 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4551 if (iSegReg == X86_SREG_SS)
4552 {
4553 /* In 64-bit kernel mode, the stack can be 0 because of the way
4554 interrupts are dispatched. AMD seems to have a slighly more
4555 relaxed relationship to SS.RPL than intel does. */
4556 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4557 if ( !IEM_IS_64BIT_CODE(pVCpu)
4558 || IEM_GET_CPL(pVCpu) > 2
4559 || ( uSel != IEM_GET_CPL(pVCpu)
4560 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4561 {
4562 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4563 return iemRaiseGeneralProtectionFault0(pVCpu);
4564 }
4565 }
4566
4567 *pSel = uSel; /* Not RPL, remember :-) */
4568 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4569 if (iSegReg == X86_SREG_SS)
4570 pHid->Attr.u |= IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT;
4571
4572 /* This will affect the FLAT 32-bit mode flag: */
4573 if ( iSegReg < X86_SREG_FS
4574 && IEM_IS_32BIT_CODE(pVCpu))
4575 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
4576 }
4577 /*
4578 * Protected / long mode.
4579 */
4580 else
4581 {
4582 /* Fetch the descriptor. */
4583 IEMSELDESC Desc;
4584 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4585 if (rcStrict != VINF_SUCCESS)
4586 return rcStrict;
4587
4588 /* Check GPs first. */
4589 if (!Desc.Legacy.Gen.u1DescType)
4590 {
4591 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4592 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4593 }
4594 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4595 {
4596 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4597 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4598 {
4599 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4600 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4601 }
4602 if ((uSel & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
4603 {
4604 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, IEM_GET_CPL(pVCpu)));
4605 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4606 }
4607 if (Desc.Legacy.Gen.u2Dpl != IEM_GET_CPL(pVCpu))
4608 {
4609 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4610 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4611 }
4612 }
4613 else
4614 {
4615 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4616 {
4617 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4618 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4619 }
4620 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4621 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4622 {
4623#if 0 /* this is what intel says. */
4624 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4625 && IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4626 {
4627 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4628 iSegReg, uSel, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu), Desc.Legacy.Gen.u2Dpl));
4629 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4630 }
4631#else /* this is what makes more sense. */
4632 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4633 {
4634 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4635 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4636 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4637 }
4638 if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4639 {
4640 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4641 iSegReg, uSel, IEM_GET_CPL(pVCpu), Desc.Legacy.Gen.u2Dpl));
4642 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4643 }
4644#endif
4645 }
4646 }
4647
4648 /* Is it there? */
4649 if (!Desc.Legacy.Gen.u1Present)
4650 {
4651 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4652 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4653 }
4654
4655 /* The base and limit. */
4656 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4657 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4658
4659 /*
4660 * Ok, everything checked out fine. Now set the accessed bit before
4661 * committing the result into the registers.
4662 */
4663 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4664 {
4665 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4666 if (rcStrict != VINF_SUCCESS)
4667 return rcStrict;
4668 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4669 }
4670
4671 /* commit */
4672 *pSel = uSel;
4673 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4674 pHid->u32Limit = cbLimit;
4675 pHid->u64Base = u64Base;
4676 pHid->ValidSel = uSel;
4677 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4678
4679 /** @todo check if the hidden bits are loaded correctly for 64-bit
4680 * mode. */
4681
4682 /* This will affect the FLAT 32-bit mode flag: */
4683 if ( iSegReg < X86_SREG_FS
4684 && IEM_IS_32BIT_CODE(pVCpu))
4685 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
4686 | iemCalc32BitFlatIndicator(pVCpu);
4687 }
4688
4689 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4690 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4691 return VINF_SUCCESS;
4692}
4693
4694
4695/**
4696 * Implements 'mov SReg, r/m'.
4697 *
4698 * @param iSegReg The segment register number (valid).
4699 * @param uSel The new selector value.
4700 */
4701IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4702{
4703 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4704 if (rcStrict == VINF_SUCCESS)
4705 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4706 return rcStrict;
4707}
4708
4709
4710/**
4711 * Implements 'pop SReg'.
4712 *
4713 * @param iSegReg The segment register number (valid).
4714 * @param enmEffOpSize The efficient operand size (valid).
4715 */
4716IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4717{
4718 VBOXSTRICTRC rcStrict;
4719
4720 /*
4721 * Read the selector off the stack and join paths with mov ss, reg.
4722 */
4723 RTUINT64U TmpRsp;
4724 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
4725 switch (enmEffOpSize)
4726 {
4727 case IEMMODE_16BIT:
4728 {
4729 uint16_t uSel;
4730 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4731 if (rcStrict == VINF_SUCCESS)
4732 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4733 break;
4734 }
4735
4736 case IEMMODE_32BIT:
4737 {
4738 /* Modern Intel CPU only does a WORD sized access here, both as
4739 segmentation and paging is concerned. So, we have to emulate
4740 this to make bs3-cpu-weird-1 happy. */
4741 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4742 {
4743 /* We don't have flexible enough stack primitives here, so just
4744 do a word pop and add two bytes to SP/RSP on success. */
4745 uint16_t uSel;
4746 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4747 if (rcStrict == VINF_SUCCESS)
4748 {
4749 iemRegAddToRspEx(pVCpu, &TmpRsp, sizeof(uint32_t) - sizeof(uint16_t));
4750 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4751 }
4752 }
4753 else
4754 {
4755 uint32_t u32Value;
4756 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4757 if (rcStrict == VINF_SUCCESS)
4758 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u32Value);
4759 }
4760 break;
4761 }
4762
4763 case IEMMODE_64BIT:
4764 {
4765 /* Like for the 32-bit case above, intel only does a WORD access. */
4766 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4767 {
4768 uint16_t uSel;
4769 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4770 if (rcStrict == VINF_SUCCESS)
4771 {
4772 iemRegAddToRspEx(pVCpu, &TmpRsp, sizeof(uint64_t) - sizeof(uint16_t));
4773 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4774 }
4775 }
4776 else
4777 {
4778 uint64_t u64Value;
4779 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4780 if (rcStrict == VINF_SUCCESS)
4781 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u64Value);
4782 }
4783 break;
4784 }
4785 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4786 }
4787
4788 /*
4789 * If the load succeeded, commit the stack change and finish the instruction.
4790 */
4791 if (rcStrict == VINF_SUCCESS)
4792 {
4793 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
4794 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4795 }
4796
4797 return rcStrict;
4798}
4799
4800
4801/**
4802 * Implements lgs, lfs, les, lds & lss.
4803 */
4804IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize)
4805{
4806 /*
4807 * Use iemCImpl_LoadSRegWorker to do the tricky segment register loading.
4808 */
4809 /** @todo verify and test that mov, pop and lXs works the segment
4810 * register loading in the exact same way. */
4811 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4812 if (rcStrict == VINF_SUCCESS)
4813 {
4814 switch (enmEffOpSize)
4815 {
4816 case IEMMODE_16BIT:
4817 iemGRegStoreU16(pVCpu, iGReg, offSeg);
4818 break;
4819 case IEMMODE_32BIT:
4820 case IEMMODE_64BIT:
4821 iemGRegStoreU64(pVCpu, iGReg, offSeg);
4822 break;
4823 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4824 }
4825 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4826 }
4827 return rcStrict;
4828}
4829
4830
4831/**
4832 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4833 *
4834 * @retval VINF_SUCCESS on success.
4835 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4836 * @retval iemMemFetchSysU64 return value.
4837 *
4838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4839 * @param uSel The selector value.
4840 * @param fAllowSysDesc Whether system descriptors are OK or not.
4841 * @param pDesc Where to return the descriptor on success.
4842 */
4843static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPUCC pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4844{
4845 pDesc->Long.au64[0] = 0;
4846 pDesc->Long.au64[1] = 0;
4847
4848 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4849 return VINF_IEM_SELECTOR_NOT_OK;
4850
4851 /* Within the table limits? */
4852 RTGCPTR GCPtrBase;
4853 if (uSel & X86_SEL_LDT)
4854 {
4855 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
4856 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
4857 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
4858 return VINF_IEM_SELECTOR_NOT_OK;
4859 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
4860 }
4861 else
4862 {
4863 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
4864 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
4865 return VINF_IEM_SELECTOR_NOT_OK;
4866 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
4867 }
4868
4869 /* Fetch the descriptor. */
4870 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4871 if (rcStrict != VINF_SUCCESS)
4872 return rcStrict;
4873 if (!pDesc->Legacy.Gen.u1DescType)
4874 {
4875 if (!fAllowSysDesc)
4876 return VINF_IEM_SELECTOR_NOT_OK;
4877 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4878 {
4879 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4880 if (rcStrict != VINF_SUCCESS)
4881 return rcStrict;
4882 }
4883
4884 }
4885
4886 return VINF_SUCCESS;
4887}
4888
4889
4890/**
4891 * Implements verr (fWrite = false) and verw (fWrite = true).
4892 */
4893IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4894{
4895 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4896
4897 /** @todo figure whether the accessed bit is set or not. */
4898
4899 bool fAccessible = true;
4900 IEMSELDESC Desc;
4901 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4902 if (rcStrict == VINF_SUCCESS)
4903 {
4904 /* Check the descriptor, order doesn't matter much here. */
4905 if ( !Desc.Legacy.Gen.u1DescType
4906 || !Desc.Legacy.Gen.u1Present)
4907 fAccessible = false;
4908 else
4909 {
4910 if ( fWrite
4911 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4912 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4913 fAccessible = false;
4914
4915 /** @todo testcase for the conforming behavior. */
4916 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4917 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4918 {
4919 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4920 fAccessible = false;
4921 else if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4922 fAccessible = false;
4923 }
4924 }
4925
4926 }
4927 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4928 fAccessible = false;
4929 else
4930 return rcStrict;
4931
4932 /* commit */
4933 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fAccessible;
4934
4935 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4936}
4937
4938
4939/**
4940 * Implements LAR and LSL with 64-bit operand size.
4941 *
4942 * @returns VINF_SUCCESS.
4943 * @param pu64Dst Pointer to the destination register.
4944 * @param uSel The selector to load details for.
4945 * @param fIsLar true = LAR, false = LSL.
4946 */
4947IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4948{
4949 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4950
4951 /** @todo figure whether the accessed bit is set or not. */
4952
4953 bool fDescOk = true;
4954 IEMSELDESC Desc;
4955 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, true /*fAllowSysDesc*/, &Desc);
4956 if (rcStrict == VINF_SUCCESS)
4957 {
4958 /*
4959 * Check the descriptor type.
4960 */
4961 if (!Desc.Legacy.Gen.u1DescType)
4962 {
4963 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4964 {
4965 if (Desc.Long.Gen.u5Zeros)
4966 fDescOk = false;
4967 else
4968 switch (Desc.Long.Gen.u4Type)
4969 {
4970 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4971 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4972 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4973 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4974 break;
4975 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4976 fDescOk = fIsLar;
4977 break;
4978 default:
4979 fDescOk = false;
4980 break;
4981 }
4982 }
4983 else
4984 {
4985 switch (Desc.Long.Gen.u4Type)
4986 {
4987 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4988 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4989 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4990 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4991 case X86_SEL_TYPE_SYS_LDT:
4992 break;
4993 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4994 case X86_SEL_TYPE_SYS_TASK_GATE:
4995 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4996 fDescOk = fIsLar;
4997 break;
4998 default:
4999 fDescOk = false;
5000 break;
5001 }
5002 }
5003 }
5004 if (fDescOk)
5005 {
5006 /*
5007 * Check the RPL/DPL/CPL interaction..
5008 */
5009 /** @todo testcase for the conforming behavior. */
5010 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
5011 || !Desc.Legacy.Gen.u1DescType)
5012 {
5013 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
5014 fDescOk = false;
5015 else if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
5016 fDescOk = false;
5017 }
5018 }
5019
5020 if (fDescOk)
5021 {
5022 /*
5023 * All fine, start committing the result.
5024 */
5025 if (fIsLar)
5026 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
5027 else
5028 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
5029 }
5030
5031 }
5032 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
5033 fDescOk = false;
5034 else
5035 return rcStrict;
5036
5037 /* commit flags value and advance rip. */
5038 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fDescOk;
5039 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5040}
5041
5042
5043/**
5044 * Implements LAR and LSL with 16-bit operand size.
5045 *
5046 * @returns VINF_SUCCESS.
5047 * @param pu16Dst Pointer to the destination register.
5048 * @param uSel The selector to load details for.
5049 * @param fIsLar true = LAR, false = LSL.
5050 */
5051IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
5052{
5053 uint64_t u64TmpDst = *pu16Dst;
5054 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
5055 *pu16Dst = u64TmpDst;
5056 return VINF_SUCCESS;
5057}
5058
5059
5060/**
5061 * Implements lgdt.
5062 *
5063 * @param iEffSeg The segment of the new gdtr contents
5064 * @param GCPtrEffSrc The address of the new gdtr contents.
5065 * @param enmEffOpSize The effective operand size.
5066 */
5067IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5068{
5069 if (IEM_GET_CPL(pVCpu) != 0)
5070 return iemRaiseGeneralProtectionFault0(pVCpu);
5071 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5072
5073 if (!IEM_IS_IN_GUEST(pVCpu))
5074 { /* probable */ }
5075 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5076 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5077 {
5078 Log(("lgdt: Guest intercept -> VM-exit\n"));
5079 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_LGDT, cbInstr);
5080 }
5081 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
5082 {
5083 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
5084 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5085 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5086 }
5087
5088 /*
5089 * Fetch the limit and base address.
5090 */
5091 uint16_t cbLimit;
5092 RTGCPTR GCPtrBase;
5093 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5094 if (rcStrict == VINF_SUCCESS)
5095 {
5096 if ( !IEM_IS_64BIT_CODE(pVCpu)
5097 || X86_IS_CANONICAL(GCPtrBase))
5098 {
5099 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
5100 if (rcStrict == VINF_SUCCESS)
5101 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5102 }
5103 else
5104 {
5105 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5106 return iemRaiseGeneralProtectionFault0(pVCpu);
5107 }
5108 }
5109 return rcStrict;
5110}
5111
5112
5113/**
5114 * Implements sgdt.
5115 *
5116 * @param iEffSeg The segment where to store the gdtr content.
5117 * @param GCPtrEffDst The address where to store the gdtr content.
5118 */
5119IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5120{
5121 /*
5122 * Join paths with sidt.
5123 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5124 * you really must know.
5125 */
5126 if (!IEM_IS_IN_GUEST(pVCpu))
5127 { /* probable */ }
5128 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5129 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5130 {
5131 Log(("sgdt: Guest intercept -> VM-exit\n"));
5132 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_SGDT, cbInstr);
5133 }
5134 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
5135 {
5136 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
5137 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5138 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5139 }
5140
5141 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
5142 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.gdtr.pGdt, iEffSeg, GCPtrEffDst);
5143 if (rcStrict == VINF_SUCCESS)
5144 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5145 return rcStrict;
5146}
5147
5148
5149/**
5150 * Implements lidt.
5151 *
5152 * @param iEffSeg The segment of the new idtr contents
5153 * @param GCPtrEffSrc The address of the new idtr contents.
5154 * @param enmEffOpSize The effective operand size.
5155 */
5156IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5157{
5158 if (IEM_GET_CPL(pVCpu) != 0)
5159 return iemRaiseGeneralProtectionFault0(pVCpu);
5160 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5161
5162 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
5163 { /* probable */ }
5164 else
5165 {
5166 Log(("lidt: Guest intercept -> #VMEXIT\n"));
5167 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5168 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5169 }
5170
5171 /*
5172 * Fetch the limit and base address.
5173 */
5174 uint16_t cbLimit;
5175 RTGCPTR GCPtrBase;
5176 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5177 if (rcStrict == VINF_SUCCESS)
5178 {
5179 if ( !IEM_IS_64BIT_CODE(pVCpu)
5180 || X86_IS_CANONICAL(GCPtrBase))
5181 {
5182 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
5183 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5184 }
5185 else
5186 {
5187 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5188 return iemRaiseGeneralProtectionFault0(pVCpu);
5189 }
5190 }
5191 return rcStrict;
5192}
5193
5194
5195/**
5196 * Implements sidt.
5197 *
5198 * @param iEffSeg The segment where to store the idtr content.
5199 * @param GCPtrEffDst The address where to store the idtr content.
5200 */
5201IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5202{
5203 /*
5204 * Join paths with sgdt.
5205 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5206 * you really must know.
5207 */
5208 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
5209 { /* probable */ }
5210 else
5211 {
5212 Log(("sidt: Guest intercept -> #VMEXIT\n"));
5213 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5214 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5215 }
5216
5217 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_IDTR);
5218 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.idtr.cbIdt, pVCpu->cpum.GstCtx.idtr.pIdt, iEffSeg, GCPtrEffDst);
5219 if (rcStrict == VINF_SUCCESS)
5220 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5221 return rcStrict;
5222}
5223
5224
5225/**
5226 * Implements lldt.
5227 *
5228 * @param uNewLdt The new LDT selector value.
5229 */
5230IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
5231{
5232 /*
5233 * Check preconditions.
5234 */
5235 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5236 {
5237 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
5238 return iemRaiseUndefinedOpcode(pVCpu);
5239 }
5240 if (IEM_GET_CPL(pVCpu) != 0)
5241 {
5242 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, IEM_GET_CPL(pVCpu)));
5243 return iemRaiseGeneralProtectionFault0(pVCpu);
5244 }
5245
5246 /* Nested-guest VMX intercept (SVM is after all checks). */
5247 /** @todo testcase: exit vs check order. */
5248 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5249 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5250 { /* probable */ }
5251 else
5252 {
5253 Log(("lldt: Guest intercept -> VM-exit\n"));
5254 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LLDT, cbInstr);
5255 }
5256
5257 if (uNewLdt & X86_SEL_LDT)
5258 {
5259 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
5260 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
5261 }
5262
5263 /*
5264 * Now, loading a NULL selector is easy.
5265 */
5266 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
5267 {
5268 /* Nested-guest SVM intercept. */
5269 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5270 { /* probable */ }
5271 else
5272 {
5273 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5274 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5275 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5276 }
5277
5278 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
5279 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR;
5280 CPUMSetGuestLDTR(pVCpu, uNewLdt);
5281 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
5282 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5283 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
5284 {
5285 /* AMD-V seems to leave the base and limit alone. */
5286 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
5287 }
5288 else
5289 {
5290 /* VT-x (Intel 3960x) seems to be doing the following. */
5291 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
5292 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
5293 pVCpu->cpum.GstCtx.ldtr.u32Limit = UINT32_MAX;
5294 }
5295
5296 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5297 }
5298
5299 /*
5300 * Read the descriptor.
5301 */
5302 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR);
5303 IEMSELDESC Desc;
5304 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
5305 if (rcStrict != VINF_SUCCESS)
5306 return rcStrict;
5307
5308 /* Check GPs first. */
5309 if (Desc.Legacy.Gen.u1DescType)
5310 {
5311 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5312 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5313 }
5314 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
5315 {
5316 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5317 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5318 }
5319 uint64_t u64Base;
5320 if (!IEM_IS_LONG_MODE(pVCpu))
5321 u64Base = X86DESC_BASE(&Desc.Legacy);
5322 else
5323 {
5324 if (Desc.Long.Gen.u5Zeros)
5325 {
5326 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
5327 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5328 }
5329
5330 u64Base = X86DESC64_BASE(&Desc.Long);
5331 if (!IEM_IS_CANONICAL(u64Base))
5332 {
5333 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
5334 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5335 }
5336 }
5337
5338 /* NP */
5339 if (!Desc.Legacy.Gen.u1Present)
5340 {
5341 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
5342 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
5343 }
5344
5345 /* Nested-guest SVM intercept. */
5346 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5347 { /* probable */ }
5348 else
5349 {
5350 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5351 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5352 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5353 }
5354
5355 /*
5356 * It checks out alright, update the registers.
5357 */
5358/** @todo check if the actual value is loaded or if the RPL is dropped */
5359 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5360 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
5361 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5362 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5363 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5364 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
5365
5366 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5367}
5368
5369
5370/**
5371 * Implements sldt GReg
5372 *
5373 * @param iGReg The general register to store the CRx value in.
5374 * @param enmEffOpSize The operand size.
5375 */
5376IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5377{
5378 if (!IEM_IS_IN_GUEST(pVCpu))
5379 { /* probable */ }
5380 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5381 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5382 {
5383 Log(("sldt: Guest intercept -> VM-exit\n"));
5384 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_SLDT, cbInstr);
5385 }
5386 else
5387 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0, cbInstr);
5388
5389 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5390 switch (enmEffOpSize)
5391 {
5392 case IEMMODE_16BIT:
5393 iemGRegStoreU16(pVCpu, iGReg, pVCpu->cpum.GstCtx.ldtr.Sel);
5394 break;
5395 case IEMMODE_32BIT:
5396 case IEMMODE_64BIT:
5397 iemGRegStoreU64(pVCpu, iGReg, pVCpu->cpum.GstCtx.ldtr.Sel);
5398 break;
5399 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5400 }
5401 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5402}
5403
5404
5405/**
5406 * Implements sldt mem.
5407 *
5408 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5409 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5410 */
5411IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5412{
5413 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0, cbInstr);
5414
5415 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5416 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.ldtr.Sel);
5417 if (rcStrict == VINF_SUCCESS)
5418 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5419 return rcStrict;
5420}
5421
5422
5423/**
5424 * Implements ltr.
5425 *
5426 * @param uNewTr The new TSS selector value.
5427 */
5428IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
5429{
5430 /*
5431 * Check preconditions.
5432 */
5433 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5434 {
5435 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
5436 return iemRaiseUndefinedOpcode(pVCpu);
5437 }
5438 if (IEM_GET_CPL(pVCpu) != 0)
5439 {
5440 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, IEM_GET_CPL(pVCpu)));
5441 return iemRaiseGeneralProtectionFault0(pVCpu);
5442 }
5443 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5444 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5445 { /* probable */ }
5446 else
5447 {
5448 Log(("ltr: Guest intercept -> VM-exit\n"));
5449 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LTR, cbInstr);
5450 }
5451 if (uNewTr & X86_SEL_LDT)
5452 {
5453 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
5454 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
5455 }
5456 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
5457 {
5458 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
5459 return iemRaiseGeneralProtectionFault0(pVCpu);
5460 }
5461 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
5462 { /* probable */ }
5463 else
5464 {
5465 Log(("ltr: Guest intercept -> #VMEXIT\n"));
5466 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5467 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5468 }
5469
5470 /*
5471 * Read the descriptor.
5472 */
5473 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR);
5474 IEMSELDESC Desc;
5475 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
5476 if (rcStrict != VINF_SUCCESS)
5477 return rcStrict;
5478
5479 /* Check GPs first. */
5480 if (Desc.Legacy.Gen.u1DescType)
5481 {
5482 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5483 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5484 }
5485 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5486 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5487 || IEM_IS_LONG_MODE(pVCpu)) )
5488 {
5489 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5490 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5491 }
5492 uint64_t u64Base;
5493 if (!IEM_IS_LONG_MODE(pVCpu))
5494 u64Base = X86DESC_BASE(&Desc.Legacy);
5495 else
5496 {
5497 if (Desc.Long.Gen.u5Zeros)
5498 {
5499 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5500 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5501 }
5502
5503 u64Base = X86DESC64_BASE(&Desc.Long);
5504 if (!IEM_IS_CANONICAL(u64Base))
5505 {
5506 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5507 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5508 }
5509 }
5510
5511 /* NP */
5512 if (!Desc.Legacy.Gen.u1Present)
5513 {
5514 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5515 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5516 }
5517
5518 /*
5519 * Set it busy.
5520 * Note! Intel says this should lock down the whole descriptor, but we'll
5521 * restrict our selves to 32-bit for now due to lack of inline
5522 * assembly and such.
5523 */
5524 uint8_t bUnmapInfo;
5525 void *pvDesc;
5526 rcStrict = iemMemMap(pVCpu, &pvDesc, &bUnmapInfo, 8, UINT8_MAX,
5527 pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW, 0);
5528 if (rcStrict != VINF_SUCCESS)
5529 return rcStrict;
5530 switch ((uintptr_t)pvDesc & 3)
5531 {
5532 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5533 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5534 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5535 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5536 }
5537 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
5538 if (rcStrict != VINF_SUCCESS)
5539 return rcStrict;
5540 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5541
5542 /*
5543 * It checks out alright, update the registers.
5544 */
5545/** @todo check if the actual value is loaded or if the RPL is dropped */
5546 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5547 pVCpu->cpum.GstCtx.tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5548 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5549 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5550 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5551 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
5552
5553 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5554}
5555
5556
5557/**
5558 * Implements str GReg
5559 *
5560 * @param iGReg The general register to store the CRx value in.
5561 * @param enmEffOpSize The operand size.
5562 */
5563IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5564{
5565 if (!IEM_IS_IN_GUEST(pVCpu))
5566 { /* probable */ }
5567 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5568 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5569 {
5570 Log(("str_reg: Guest intercept -> VM-exit\n"));
5571 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5572 }
5573 else
5574 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0, cbInstr);
5575
5576 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5577 switch (enmEffOpSize)
5578 {
5579 case IEMMODE_16BIT:
5580 iemGRegStoreU16(pVCpu, iGReg, pVCpu->cpum.GstCtx.tr.Sel);
5581 break;
5582 case IEMMODE_32BIT:
5583 case IEMMODE_64BIT:
5584 iemGRegStoreU64(pVCpu, iGReg, pVCpu->cpum.GstCtx.tr.Sel);
5585 break;
5586 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5587 }
5588 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5589}
5590
5591
5592/**
5593 * Implements str mem.
5594 *
5595 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5596 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5597 */
5598IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5599{
5600 if (!IEM_IS_IN_GUEST(pVCpu))
5601 { /* probable */ }
5602 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5603 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5604 {
5605 Log(("str_mem: Guest intercept -> VM-exit\n"));
5606 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5607 }
5608 else
5609 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0, cbInstr);
5610
5611 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5612 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.tr.Sel);
5613 if (rcStrict == VINF_SUCCESS)
5614 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5615 return rcStrict;
5616}
5617
5618
5619/**
5620 * Implements mov GReg,CRx.
5621 *
5622 * @param iGReg The general register to store the CRx value in.
5623 * @param iCrReg The CRx register to read (valid).
5624 */
5625IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5626{
5627 if (IEM_GET_CPL(pVCpu) != 0)
5628 return iemRaiseGeneralProtectionFault0(pVCpu);
5629 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5630
5631 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5632 { /* probable */ }
5633 else
5634 {
5635 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5636 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5637 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5638 }
5639
5640 /* Read it. */
5641 uint64_t crX;
5642 switch (iCrReg)
5643 {
5644 case 0:
5645 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5646 crX = pVCpu->cpum.GstCtx.cr0;
5647 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5648 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5649 break;
5650 case 2:
5651 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR2);
5652 crX = pVCpu->cpum.GstCtx.cr2;
5653 break;
5654 case 3:
5655 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5656 crX = pVCpu->cpum.GstCtx.cr3;
5657 break;
5658 case 4:
5659 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5660 crX = pVCpu->cpum.GstCtx.cr4;
5661 break;
5662 case 8:
5663 {
5664 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5665 if (!IEM_IS_IN_GUEST(pVCpu))
5666 { /* probable */ }
5667#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5668 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5669 {
5670 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr8(pVCpu, iGReg, cbInstr);
5671 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5672 return rcStrict;
5673
5674 /*
5675 * If the Mov-from-CR8 doesn't cause a VM-exit, bits 7:4 of the VTPR is copied
5676 * to bits 0:3 of the destination operand. Bits 63:4 of the destination operand
5677 * are cleared.
5678 *
5679 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
5680 */
5681 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
5682 {
5683 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5684 crX = (uTpr >> 4) & 0xf;
5685 break;
5686 }
5687 }
5688#endif
5689#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5690 else if (pVCpu->iem.s.fExec & IEM_F_X86_CTX_SVM)
5691 {
5692 PCSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
5693 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
5694 {
5695 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5696 break;
5697 }
5698 }
5699#endif
5700 uint8_t uTpr;
5701 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5702 if (RT_SUCCESS(rc))
5703 crX = uTpr >> 4;
5704 else
5705 crX = 0;
5706 break;
5707 }
5708 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5709 }
5710
5711#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5712 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5713 { /* probable */ }
5714 else
5715 switch (iCrReg)
5716 {
5717 /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
5718 case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); break;
5719 case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u); break;
5720 case 3:
5721 {
5722 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr3(pVCpu, iGReg, cbInstr);
5723 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5724 return rcStrict;
5725 break;
5726 }
5727 }
5728#endif
5729
5730 /* Store it. */
5731 if (IEM_IS_64BIT_CODE(pVCpu))
5732 iemGRegStoreU64(pVCpu, iGReg, crX);
5733 else
5734 iemGRegStoreU64(pVCpu, iGReg, (uint32_t)crX);
5735
5736 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5737}
5738
5739
5740/**
5741 * Implements smsw GReg.
5742 *
5743 * @param iGReg The general register to store the CRx value in.
5744 * @param enmEffOpSize The operand size.
5745 */
5746IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5747{
5748 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */, cbInstr);
5749
5750#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5751 uint64_t u64MaskedCr0;
5752 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5753 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5754 else
5755 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5756 uint64_t const u64GuestCr0 = u64MaskedCr0;
5757#else
5758 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5759#endif
5760
5761 switch (enmEffOpSize)
5762 {
5763 case IEMMODE_16BIT:
5764 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5765 iemGRegStoreU16(pVCpu, iGReg, (uint16_t)u64GuestCr0);
5766 /* Unused bits are set on 386 and older CPU: */
5767 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5768 iemGRegStoreU16(pVCpu, iGReg, (uint16_t)u64GuestCr0 | 0xffe0);
5769 else
5770 iemGRegStoreU16(pVCpu, iGReg, (uint16_t)u64GuestCr0 | 0xfff0);
5771 break;
5772
5773/** @todo testcase for bits 31:16. We're not doing that correctly. */
5774
5775 case IEMMODE_32BIT:
5776 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5777 iemGRegStoreU32(pVCpu, iGReg, (uint32_t)u64GuestCr0);
5778 else /** @todo test this! */
5779 iemGRegStoreU32(pVCpu, iGReg, (uint32_t)u64GuestCr0 | UINT32_C(0x7fffffe0)); /* Unused bits are set on 386. */
5780 break;
5781
5782 case IEMMODE_64BIT:
5783 iemGRegStoreU64(pVCpu, iGReg, u64GuestCr0);
5784 break;
5785
5786 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5787 }
5788
5789 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5790}
5791
5792
5793/**
5794 * Implements smsw mem.
5795 *
5796 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5797 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5798 */
5799IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5800{
5801 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5802 if (!IEM_IS_IN_GUEST(pVCpu))
5803 { /* probable */ }
5804 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5805 u64GuestCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5806 else
5807 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */, cbInstr);
5808
5809 uint16_t u16Value;
5810 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5811 u16Value = (uint16_t)u64GuestCr0;
5812 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5813 u16Value = (uint16_t)u64GuestCr0 | 0xffe0;
5814 else
5815 u16Value = (uint16_t)u64GuestCr0 | 0xfff0;
5816
5817 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
5818 if (rcStrict == VINF_SUCCESS)
5819 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5820 return rcStrict;
5821}
5822
5823
5824/**
5825 * Helper for mapping CR3 and PAE PDPEs for 'mov CRx,GReg'.
5826 */
5827#define IEM_MAP_PAE_PDPES_AT_CR3_RET(a_pVCpu, a_iCrReg, a_uCr3) \
5828 do \
5829 { \
5830 int const rcX = PGMGstMapPaePdpesAtCr3(a_pVCpu, a_uCr3); \
5831 if (RT_SUCCESS(rcX)) \
5832 { /* likely */ } \
5833 else \
5834 { \
5835 /* Either invalid PDPTEs or CR3 second-level translation failed. Raise #GP(0) either way. */ \
5836 Log(("iemCImpl_load_Cr%#x: Trying to load invalid PAE PDPEs\n", a_iCrReg)); \
5837 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
5838 } \
5839 } while (0)
5840
5841
5842/**
5843 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5844 *
5845 * @param iCrReg The CRx register to write (valid).
5846 * @param uNewCrX The new value.
5847 * @param enmAccessCrX The instruction that caused the CrX load.
5848 * @param iGReg The general register in case of a 'mov CRx,GReg'
5849 * instruction.
5850 */
5851IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5852{
5853 VBOXSTRICTRC rcStrict;
5854 int rc;
5855#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
5856 RT_NOREF2(iGReg, enmAccessCrX);
5857#endif
5858
5859 /*
5860 * Try store it.
5861 * Unfortunately, CPUM only does a tiny bit of the work.
5862 */
5863 switch (iCrReg)
5864 {
5865 case 0:
5866 {
5867 /*
5868 * Perform checks.
5869 */
5870 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5871
5872 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr0;
5873 uint32_t const fValid = CPUMGetGuestCR0ValidMask();
5874
5875 /* ET is hardcoded on 486 and later. */
5876 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5877 uNewCrX |= X86_CR0_ET;
5878 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5879 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5880 {
5881 uNewCrX &= fValid;
5882 uNewCrX |= X86_CR0_ET;
5883 }
5884 else
5885 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5886
5887 /* Check for reserved bits. */
5888 if (uNewCrX & ~(uint64_t)fValid)
5889 {
5890 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5891 return iemRaiseGeneralProtectionFault0(pVCpu);
5892 }
5893
5894 /* Check for invalid combinations. */
5895 if ( (uNewCrX & X86_CR0_PG)
5896 && !(uNewCrX & X86_CR0_PE) )
5897 {
5898 Log(("Trying to set CR0.PG without CR0.PE\n"));
5899 return iemRaiseGeneralProtectionFault0(pVCpu);
5900 }
5901
5902 if ( !(uNewCrX & X86_CR0_CD)
5903 && (uNewCrX & X86_CR0_NW) )
5904 {
5905 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5906 return iemRaiseGeneralProtectionFault0(pVCpu);
5907 }
5908
5909 if ( !(uNewCrX & X86_CR0_PG)
5910 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE))
5911 {
5912 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
5913 return iemRaiseGeneralProtectionFault0(pVCpu);
5914 }
5915
5916 /* Long mode consistency checks. */
5917 if ( (uNewCrX & X86_CR0_PG)
5918 && !(uOldCrX & X86_CR0_PG)
5919 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5920 {
5921 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE))
5922 {
5923 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5924 return iemRaiseGeneralProtectionFault0(pVCpu);
5925 }
5926 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long)
5927 {
5928 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5929 return iemRaiseGeneralProtectionFault0(pVCpu);
5930 }
5931 }
5932
5933 /** @todo testcase: what happens if we disable paging while in 64-bit code? */
5934
5935 if (!IEM_IS_IN_GUEST(pVCpu))
5936 { /* probable */ }
5937#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5938 /* Check for bits that must remain set or cleared in VMX operation,
5939 see Intel spec. 23.8 "Restrictions on VMX operation". */
5940 else if (IEM_VMX_IS_ROOT_MODE(pVCpu))
5941 {
5942 uint64_t const uCr0Fixed0 = iemVmxGetCr0Fixed0(pVCpu, IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
5943 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
5944 {
5945 Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
5946 return iemRaiseGeneralProtectionFault0(pVCpu);
5947 }
5948
5949 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5950 if (uNewCrX & ~uCr0Fixed1)
5951 {
5952 Log(("Trying to set reserved CR0 bits in VMX operation: NewCr0=%#llx MB0=%#llx\n", uNewCrX, uCr0Fixed1));
5953 return iemRaiseGeneralProtectionFault0(pVCpu);
5954 }
5955 }
5956#endif
5957 /*
5958 * SVM nested-guest CR0 write intercepts.
5959 */
5960 else if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5961 {
5962 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5963 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5964 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5965 }
5966 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5967 {
5968 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5969 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5970 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5971 {
5972 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5973 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
5974 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5975 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
5976 }
5977 }
5978
5979 /*
5980 * Change EFER.LMA if entering or leaving long mode.
5981 */
5982 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
5983 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5984 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5985 {
5986 if (uNewCrX & X86_CR0_PG)
5987 NewEFER |= MSR_K6_EFER_LMA;
5988 else
5989 NewEFER &= ~MSR_K6_EFER_LMA;
5990
5991 CPUMSetGuestEFER(pVCpu, NewEFER);
5992 Assert(pVCpu->cpum.GstCtx.msrEFER == NewEFER);
5993 }
5994
5995 IEMTLBTRACE_LOAD_CR0(pVCpu, uNewCrX, uOldCrX);
5996
5997 /*
5998 * Inform PGM.
5999 */
6000 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW))
6001 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) )
6002 {
6003 if ( enmAccessCrX != IEMACCESSCRX_MOV_CRX
6004 || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER)
6005 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6006 { /* likely */ }
6007 else
6008 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6009 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6010 AssertRCReturn(rc, rc);
6011 /* ignore informational status codes */
6012 }
6013
6014 /*
6015 * Change CR0.
6016 */
6017 CPUMSetGuestCR0(pVCpu, uNewCrX);
6018 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
6019
6020 /* Update the fExec flags if PE changed. */
6021 if ((uNewCrX ^ uOldCrX) & X86_CR0_PE)
6022 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
6023
6024 /*
6025 * Inform PGM some more...
6026 */
6027 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6028 false /* fForce */);
6029 break;
6030 }
6031
6032 /*
6033 * CR2 can be changed without any restrictions.
6034 */
6035 case 2:
6036 {
6037 if (!IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
6038 { /* probable */ }
6039 else
6040 {
6041 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6042 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6043 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
6044 }
6045 pVCpu->cpum.GstCtx.cr2 = uNewCrX;
6046 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_CR2;
6047 rcStrict = VINF_SUCCESS;
6048 break;
6049 }
6050
6051 /*
6052 * CR3 is relatively simple, although AMD and Intel have different
6053 * accounts of how setting reserved bits are handled. We take intel's
6054 * word for the lower bits and AMD's for the high bits (63:52). The
6055 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
6056 * on this.
6057 */
6058 /** @todo Testcase: Setting reserved bits in CR3, especially before
6059 * enabling paging. */
6060 case 3:
6061 {
6062 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
6063
6064 /* Bit 63 being clear in the source operand with PCIDE indicates no invalidations are required. */
6065 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE)
6066 && (uNewCrX & RT_BIT_64(63)))
6067 {
6068 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
6069 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
6070 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
6071 * Paging-Structure Caches". */
6072 uNewCrX &= ~RT_BIT_64(63);
6073 }
6074
6075 /* Check / mask the value. */
6076#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6077 /* See Intel spec. 27.2.2 "EPT Translation Mechanism" footnote. */
6078 uint64_t const fInvPhysMask = !CPUMIsGuestVmxEptPagingEnabledEx(IEM_GET_CTX(pVCpu))
6079 ? (UINT64_MAX << IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
6080 : (~X86_CR3_EPT_PAGE_MASK & X86_PAGE_4K_BASE_MASK);
6081#else
6082 uint64_t const fInvPhysMask = UINT64_C(0xfff0000000000000);
6083#endif
6084 if (uNewCrX & fInvPhysMask)
6085 {
6086 /** @todo Should we raise this only for 64-bit mode like Intel claims? AMD is
6087 * very vague in this area. As mentioned above, need testcase on real
6088 * hardware... Sigh. */
6089 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
6090 return iemRaiseGeneralProtectionFault0(pVCpu);
6091 }
6092
6093 uint64_t fValid;
6094 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6095 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME))
6096 {
6097 /** @todo Redundant? This value has already been validated above. */
6098 fValid = UINT64_C(0x000fffffffffffff);
6099 }
6100 else
6101 fValid = UINT64_C(0xffffffff);
6102 if (uNewCrX & ~fValid)
6103 {
6104 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
6105 uNewCrX, uNewCrX & ~fValid));
6106 uNewCrX &= fValid;
6107 }
6108
6109 if (!IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
6110 { /* probable */ }
6111 else
6112 {
6113 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6114 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6115 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
6116 }
6117
6118 IEMTLBTRACE_LOAD_CR3(pVCpu, uNewCrX, pVCpu->cpum.GstCtx.cr3);
6119
6120 /* Inform PGM. */
6121 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
6122 {
6123 if ( !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu))
6124 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6125 { /* likely */ }
6126 else
6127 {
6128 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6129 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX);
6130 }
6131 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
6132 AssertRCReturn(rc, rc);
6133 /* ignore informational status codes */
6134 }
6135
6136 /* Make the change. */
6137 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
6138 AssertRCSuccessReturn(rc, rc);
6139
6140 rcStrict = VINF_SUCCESS;
6141 break;
6142 }
6143
6144 /*
6145 * CR4 is a bit more tedious as there are bits which cannot be cleared
6146 * under some circumstances and such.
6147 */
6148 case 4:
6149 {
6150 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6151 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr4;
6152
6153 /* Reserved bits. */
6154 uint32_t const fValid = CPUMGetGuestCR4ValidMask(pVCpu->CTX_SUFF(pVM));
6155 if (uNewCrX & ~(uint64_t)fValid)
6156 {
6157 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
6158 return iemRaiseGeneralProtectionFault0(pVCpu);
6159 }
6160
6161 bool const fPcide = !(uOldCrX & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
6162 bool const fLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
6163
6164 /* PCIDE check. */
6165 if ( fPcide
6166 && ( !fLongMode
6167 || (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))))
6168 {
6169 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))));
6170 return iemRaiseGeneralProtectionFault0(pVCpu);
6171 }
6172
6173 /* PAE check. */
6174 if ( fLongMode
6175 && (uOldCrX & X86_CR4_PAE)
6176 && !(uNewCrX & X86_CR4_PAE))
6177 {
6178 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
6179 return iemRaiseGeneralProtectionFault0(pVCpu);
6180 }
6181
6182 if (!IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
6183 { /* probable */ }
6184 else
6185 {
6186 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6187 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6188 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
6189 }
6190
6191 /* Check for bits that must remain set or cleared in VMX operation,
6192 see Intel spec. 23.8 "Restrictions on VMX operation". */
6193 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
6194 { /* probable */ }
6195 else
6196 {
6197 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
6198 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
6199 {
6200 Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
6201 return iemRaiseGeneralProtectionFault0(pVCpu);
6202 }
6203
6204 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
6205 if (uNewCrX & ~uCr4Fixed1)
6206 {
6207 Log(("Trying to set reserved CR4 bits in VMX operation: NewCr4=%#llx MB0=%#llx\n", uNewCrX, uCr4Fixed1));
6208 return iemRaiseGeneralProtectionFault0(pVCpu);
6209 }
6210 }
6211
6212 IEMTLBTRACE_LOAD_CR4(pVCpu, uNewCrX, uOldCrX);
6213
6214 /*
6215 * Notify PGM.
6216 */
6217 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
6218 {
6219 if ( !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER)
6220 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6221 { /* likely */ }
6222 else
6223 {
6224 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6225 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6226 }
6227 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6228 AssertRCReturn(rc, rc);
6229 /* ignore informational status codes */
6230 }
6231
6232 /*
6233 * Change it.
6234 */
6235 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
6236 AssertRCSuccessReturn(rc, rc);
6237 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
6238
6239 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6240 false /* fForce */);
6241 break;
6242 }
6243
6244 /*
6245 * CR8 maps to the APIC TPR.
6246 */
6247 case 8:
6248 {
6249 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
6250 if (uNewCrX & ~(uint64_t)0xf)
6251 {
6252 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
6253 return iemRaiseGeneralProtectionFault0(pVCpu);
6254 }
6255
6256 if (!IEM_IS_IN_GUEST(pVCpu))
6257 { /* probable */ }
6258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6259 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6260 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
6261 {
6262 /*
6263 * If the Mov-to-CR8 doesn't cause a VM-exit, bits 0:3 of the source operand
6264 * is copied to bits 7:4 of the VTPR. Bits 0:3 and bits 31:8 of the VTPR are
6265 * cleared. Following this the processor performs TPR virtualization.
6266 *
6267 * However, we should not perform TPR virtualization immediately here but
6268 * after this instruction has completed.
6269 *
6270 * See Intel spec. 29.3 "Virtualizing CR8-based TPR Accesses"
6271 * See Intel spec. 27.1 "Architectural State Before A VM-exit"
6272 */
6273 uint32_t const uTpr = (uNewCrX & 0xf) << 4;
6274 Log(("iemCImpl_load_Cr%#x: Virtualizing TPR (%#x) write\n", iCrReg, uTpr));
6275 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
6276 iemVmxVirtApicSetPendingWrite(pVCpu, XAPIC_OFF_TPR);
6277 rcStrict = VINF_SUCCESS;
6278 break;
6279 }
6280#endif
6281#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6282 else if (pVCpu->iem.s.fExec & IEM_F_X86_CTX_SVM)
6283 {
6284 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
6285 {
6286 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6287 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6288 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
6289 }
6290
6291 pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VTPR = uNewCrX;
6292 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
6293 {
6294 rcStrict = VINF_SUCCESS;
6295 break;
6296 }
6297 }
6298#endif
6299 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
6300 APICSetTpr(pVCpu, u8Tpr);
6301 rcStrict = VINF_SUCCESS;
6302 break;
6303 }
6304
6305 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6306 }
6307
6308 /*
6309 * Advance the RIP on success.
6310 */
6311 if (RT_SUCCESS(rcStrict))
6312 {
6313 if (rcStrict != VINF_SUCCESS)
6314 iemSetPassUpStatus(pVCpu, rcStrict);
6315 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6316 }
6317
6318 return rcStrict;
6319}
6320
6321
6322/**
6323 * Implements mov CRx,GReg.
6324 *
6325 * @param iCrReg The CRx register to write (valid).
6326 * @param iGReg The general register to load the CRx value from.
6327 */
6328IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
6329{
6330 if (IEM_GET_CPL(pVCpu) != 0)
6331 return iemRaiseGeneralProtectionFault0(pVCpu);
6332 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6333
6334 /*
6335 * Read the new value from the source register and call common worker.
6336 */
6337 uint64_t uNewCrX;
6338 if (IEM_IS_64BIT_CODE(pVCpu))
6339 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
6340 else
6341 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
6342
6343#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6344 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6345 { /* probable */ }
6346 else
6347 {
6348 VBOXSTRICTRC rcStrict = VINF_VMX_INTERCEPT_NOT_ACTIVE;
6349 switch (iCrReg)
6350 {
6351 case 0:
6352 case 4: rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr); break;
6353 case 3: rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr); break;
6354 case 8: rcStrict = iemVmxVmexitInstrMovToCr8(pVCpu, iGReg, cbInstr); break;
6355 }
6356 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6357 return rcStrict;
6358 }
6359#endif
6360
6361 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
6362}
6363
6364
6365/**
6366 * Implements 'LMSW r/m16'
6367 *
6368 * @param u16NewMsw The new value.
6369 * @param GCPtrEffDst The guest-linear address of the source operand in case
6370 * of a memory operand. For register operand, pass
6371 * NIL_RTGCPTR.
6372 */
6373IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst)
6374{
6375 if (IEM_GET_CPL(pVCpu) != 0)
6376 return iemRaiseGeneralProtectionFault0(pVCpu);
6377 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6378 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6379
6380#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6381 /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */
6382 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6383 { /* probable */ }
6384 else
6385 {
6386 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr);
6387 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6388 return rcStrict;
6389 }
6390#else
6391 RT_NOREF_PV(GCPtrEffDst);
6392#endif
6393
6394 /*
6395 * Compose the new CR0 value and call common worker.
6396 */
6397 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6398 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6399 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
6400}
6401
6402
6403/**
6404 * Implements 'CLTS'.
6405 */
6406IEM_CIMPL_DEF_0(iemCImpl_clts)
6407{
6408 if (IEM_GET_CPL(pVCpu) != 0)
6409 return iemRaiseGeneralProtectionFault0(pVCpu);
6410
6411 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6412 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0;
6413 uNewCr0 &= ~X86_CR0_TS;
6414
6415#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6416 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6417 { /* probable */ }
6418 else
6419 {
6420 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrClts(pVCpu, cbInstr);
6421 if (rcStrict == VINF_VMX_MODIFIES_BEHAVIOR)
6422 uNewCr0 |= (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS);
6423 else if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6424 return rcStrict;
6425 }
6426#endif
6427
6428 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
6429}
6430
6431
6432/**
6433 * Implements mov GReg,DRx.
6434 *
6435 * @param iGReg The general register to store the DRx value in.
6436 * @param iDrReg The DRx register to read (0-7).
6437 */
6438IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
6439{
6440#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6441 /*
6442 * Check nested-guest VMX intercept.
6443 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6444 * over CPL and CR4.DE and even DR4/DR5 checks.
6445 *
6446 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6447 */
6448 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6449 { /* probable */ }
6450 else
6451 {
6452 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_FROM_DRX, iDrReg, iGReg, cbInstr);
6453 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6454 return rcStrict;
6455 }
6456#endif
6457
6458 /*
6459 * Check preconditions.
6460 */
6461 /* Raise GPs. */
6462 if (IEM_GET_CPL(pVCpu) != 0)
6463 return iemRaiseGeneralProtectionFault0(pVCpu);
6464 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6465 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6466
6467 /** @todo \#UD in outside ring-0 too? */
6468 if (iDrReg == 4 || iDrReg == 5)
6469 {
6470 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6471 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6472 {
6473 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
6474 return iemRaiseGeneralProtectionFault0(pVCpu);
6475 }
6476 iDrReg += 2;
6477 }
6478
6479 /* Raise #DB if general access detect is enabled. */
6480 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6481 {
6482 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
6483 return iemRaiseDebugException(pVCpu);
6484 }
6485
6486 /*
6487 * Read the debug register and store it in the specified general register.
6488 */
6489 uint64_t drX;
6490 switch (iDrReg)
6491 {
6492 case 0:
6493 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6494 drX = pVCpu->cpum.GstCtx.dr[0];
6495 break;
6496 case 1:
6497 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6498 drX = pVCpu->cpum.GstCtx.dr[1];
6499 break;
6500 case 2:
6501 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6502 drX = pVCpu->cpum.GstCtx.dr[2];
6503 break;
6504 case 3:
6505 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6506 drX = pVCpu->cpum.GstCtx.dr[3];
6507 break;
6508 case 6:
6509 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6510 drX = pVCpu->cpum.GstCtx.dr[6];
6511 drX |= X86_DR6_RA1_MASK;
6512 drX &= ~X86_DR6_RAZ_MASK;
6513 break;
6514 case 7:
6515 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6516 drX = pVCpu->cpum.GstCtx.dr[7];
6517 drX |=X86_DR7_RA1_MASK;
6518 drX &= ~X86_DR7_RAZ_MASK;
6519 break;
6520 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* caller checks */
6521 }
6522
6523 /** @todo SVM nested-guest intercept for DR8-DR15? */
6524 /*
6525 * Check for any SVM nested-guest intercepts for the DRx read.
6526 */
6527 if (!IEM_SVM_IS_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
6528 { /* probable */ }
6529 else
6530 {
6531 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
6532 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6533 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
6534 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6535 }
6536
6537 if (IEM_IS_64BIT_CODE(pVCpu))
6538 iemGRegStoreU64(pVCpu, iGReg, drX);
6539 else
6540 iemGRegStoreU32(pVCpu, iGReg, (uint32_t)drX);
6541
6542 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6543}
6544
6545
6546/**
6547 * Implements mov DRx,GReg.
6548 *
6549 * @param iDrReg The DRx register to write (valid).
6550 * @param iGReg The general register to load the DRx value from.
6551 */
6552IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
6553{
6554#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6555 /*
6556 * Check nested-guest VMX intercept.
6557 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6558 * over CPL and CR4.DE and even DR4/DR5 checks.
6559 *
6560 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6561 */
6562 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6563 { /* probable */ }
6564 else
6565 {
6566 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_TO_DRX, iDrReg, iGReg, cbInstr);
6567 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6568 return rcStrict;
6569 }
6570#endif
6571
6572 /*
6573 * Check preconditions.
6574 */
6575 if (IEM_GET_CPL(pVCpu) != 0)
6576 return iemRaiseGeneralProtectionFault0(pVCpu);
6577 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6578 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6579
6580 if (iDrReg == 4 || iDrReg == 5)
6581 {
6582 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6583 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6584 {
6585 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
6586 return iemRaiseGeneralProtectionFault0(pVCpu);
6587 }
6588 iDrReg += 2;
6589 }
6590
6591 /* Raise #DB if general access detect is enabled. */
6592 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
6593 * \#GP? */
6594 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6595 {
6596 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
6597 return iemRaiseDebugException(pVCpu);
6598 }
6599
6600 /*
6601 * Read the new value from the source register.
6602 */
6603 uint64_t uNewDrX;
6604 if (IEM_IS_64BIT_CODE(pVCpu))
6605 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
6606 else
6607 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
6608
6609 /*
6610 * Adjust it.
6611 */
6612 switch (iDrReg)
6613 {
6614 case 0:
6615 case 1:
6616 case 2:
6617 case 3:
6618 /* nothing to adjust */
6619 break;
6620
6621 case 6:
6622 if (uNewDrX & X86_DR6_MBZ_MASK)
6623 {
6624 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6625 return iemRaiseGeneralProtectionFault0(pVCpu);
6626 }
6627 uNewDrX |= X86_DR6_RA1_MASK;
6628 uNewDrX &= ~X86_DR6_RAZ_MASK;
6629 break;
6630
6631 case 7:
6632 if (uNewDrX & X86_DR7_MBZ_MASK)
6633 {
6634 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6635 return iemRaiseGeneralProtectionFault0(pVCpu);
6636 }
6637 uNewDrX |= X86_DR7_RA1_MASK;
6638 uNewDrX &= ~X86_DR7_RAZ_MASK;
6639 break;
6640
6641 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6642 }
6643
6644 /** @todo SVM nested-guest intercept for DR8-DR15? */
6645 /*
6646 * Check for any SVM nested-guest intercepts for the DRx write.
6647 */
6648 if (!IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
6649 { /* probable */ }
6650 else
6651 {
6652 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
6653 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6654 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
6655 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6656 }
6657
6658 /*
6659 * Do the actual setting.
6660 */
6661 if (iDrReg < 4)
6662 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6663 else if (iDrReg == 6)
6664 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6665
6666 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
6667 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
6668
6669 /*
6670 * Re-init hardware breakpoint summary if it was DR7 that got changed.
6671 *
6672 * We also do this when an active data breakpoint is updated so that the
6673 * TLB entry can be correctly invalidated.
6674 */
6675 if ( iDrReg == 7
6676#ifdef IEM_WITH_DATA_TLB
6677 || ( iDrReg <= 3
6678 && (X86_DR7_L_G(iDrReg) & pVCpu->cpum.GstCtx.dr[7])
6679 && X86_DR7_IS_W_CFG(pVCpu->cpum.GstCtx.dr[7], iDrReg) )
6680#endif
6681 )
6682 iemRecalcExecDbgFlags(pVCpu);
6683
6684 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6685}
6686
6687
6688/**
6689 * Implements mov GReg,TRx.
6690 *
6691 * @param iGReg The general register to store the
6692 * TRx value in.
6693 * @param iTrReg The TRx register to read (6/7).
6694 */
6695IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg)
6696{
6697 /*
6698 * Check preconditions. NB: This instruction is 386/486 only.
6699 */
6700
6701 /* Raise GPs. */
6702 if (IEM_GET_CPL(pVCpu) != 0)
6703 return iemRaiseGeneralProtectionFault0(pVCpu);
6704 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6705
6706 if (iTrReg < 6 || iTrReg > 7)
6707 {
6708 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6709 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6710 return iemRaiseGeneralProtectionFault0(pVCpu);
6711 }
6712
6713 /*
6714 * Read the test register and store it in the specified general register.
6715 * This is currently a dummy implementation that only exists to satisfy
6716 * old debuggers like WDEB386 or OS/2 KDB which unconditionally read the
6717 * TR6/TR7 registers. Software which actually depends on the TR values
6718 * (different on 386/486) is exceedingly rare.
6719 */
6720 uint32_t trX;
6721 switch (iTrReg)
6722 {
6723 case 6:
6724 trX = 0; /* Currently a dummy. */
6725 break;
6726 case 7:
6727 trX = 0; /* Currently a dummy. */
6728 break;
6729 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6730 }
6731
6732 iemGRegStoreU32(pVCpu, iGReg, trX);
6733
6734 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6735}
6736
6737
6738/**
6739 * Implements mov TRx,GReg.
6740 *
6741 * @param iTrReg The TRx register to write (valid).
6742 * @param iGReg The general register to load the TRx
6743 * value from.
6744 */
6745IEM_CIMPL_DEF_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg)
6746{
6747 /*
6748 * Check preconditions. NB: This instruction is 386/486 only.
6749 */
6750
6751 /* Raise GPs. */
6752 if (IEM_GET_CPL(pVCpu) != 0)
6753 return iemRaiseGeneralProtectionFault0(pVCpu);
6754 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6755
6756 if (iTrReg < 6 || iTrReg > 7)
6757 {
6758 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6759 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6760 return iemRaiseGeneralProtectionFault0(pVCpu);
6761 }
6762
6763 /*
6764 * Read the new value from the source register.
6765 */
6766 uint32_t uNewTrX = iemGRegFetchU32(pVCpu, iGReg);
6767
6768 /*
6769 * Here we would do the actual setting if this weren't a dummy implementation.
6770 * This is currently a dummy implementation that only exists to prevent
6771 * old debuggers like WDEB386 or OS/2 KDB from crashing.
6772 */
6773 RT_NOREF(uNewTrX);
6774
6775 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6776}
6777
6778
6779/**
6780 * Implements 'INVLPG m'.
6781 *
6782 * @param GCPtrPage The effective address of the page to invalidate.
6783 * @remarks Updates the RIP.
6784 */
6785IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
6786{
6787 /* ring-0 only. */
6788 if (IEM_GET_CPL(pVCpu) != 0)
6789 return iemRaiseGeneralProtectionFault0(pVCpu);
6790 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6791 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6792
6793 if (!IEM_IS_IN_GUEST(pVCpu))
6794 { /* probable */ }
6795#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6796 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6797 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6798 {
6799 Log(("invlpg: Guest intercept (%RGp) -> VM-exit\n", GCPtrPage));
6800 return iemVmxVmexitInstrInvlpg(pVCpu, GCPtrPage, cbInstr);
6801 }
6802#endif
6803 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
6804 {
6805 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
6806 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6807 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG,
6808 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
6809 }
6810
6811 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
6812 if (rc == VINF_SUCCESS)
6813 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6814 if (rc == VINF_PGM_SYNC_CR3)
6815 {
6816 iemSetPassUpStatus(pVCpu, rc);
6817 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6818 }
6819
6820 AssertMsg(RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6821 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
6822 return rc;
6823}
6824
6825
6826/**
6827 * Implements INVPCID.
6828 *
6829 * @param iEffSeg The segment of the invpcid descriptor.
6830 * @param GCPtrInvpcidDesc The address of invpcid descriptor.
6831 * @param uInvpcidType The invalidation type.
6832 * @remarks Updates the RIP.
6833 */
6834IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType)
6835{
6836 /*
6837 * Check preconditions.
6838 */
6839 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
6840 return iemRaiseUndefinedOpcode(pVCpu);
6841
6842 /* When in VMX non-root mode and INVPCID is not enabled, it results in #UD. */
6843 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6844 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_INVPCID)))
6845 { /* likely */ }
6846 else
6847 {
6848 Log(("invpcid: Not enabled for nested-guest execution -> #UD\n"));
6849 return iemRaiseUndefinedOpcode(pVCpu);
6850 }
6851
6852 if (IEM_GET_CPL(pVCpu) != 0)
6853 {
6854 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
6855 return iemRaiseGeneralProtectionFault0(pVCpu);
6856 }
6857
6858 if (IEM_IS_V86_MODE(pVCpu))
6859 {
6860 Log(("invpcid: v8086 mode -> #GP(0)\n"));
6861 return iemRaiseGeneralProtectionFault0(pVCpu);
6862 }
6863
6864 /*
6865 * Check nested-guest intercept.
6866 *
6867 * INVPCID causes a VM-exit if "enable INVPCID" and "INVLPG exiting" are
6868 * both set. We have already checked the former earlier in this function.
6869 *
6870 * CPL and virtual-8086 mode checks take priority over this VM-exit.
6871 * See Intel spec. "25.1.1 Relative Priority of Faults and VM Exits".
6872 */
6873 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6874 || !IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6875 { /* probable */ }
6876 else
6877 {
6878 Log(("invpcid: Guest intercept -> #VM-exit\n"));
6879 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_INVPCID, VMXINSTRID_NONE, cbInstr);
6880 }
6881
6882 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
6883 {
6884 Log(("invpcid: invalid/unrecognized invpcid type %#RX64 -> #GP(0)\n", uInvpcidType));
6885 return iemRaiseGeneralProtectionFault0(pVCpu);
6886 }
6887 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6888
6889 /*
6890 * Fetch the invpcid descriptor from guest memory.
6891 */
6892/** @todo Check if the entire 128 bits are always read for all types. Check for invalid types as well. */
6893 RTUINT128U uDesc;
6894 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
6895 if (rcStrict == VINF_SUCCESS)
6896 {
6897 /*
6898 * Validate the descriptor.
6899 */
6900 if (uDesc.s.Lo > 0xfff)
6901 {
6902 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
6903 return iemRaiseGeneralProtectionFault0(pVCpu);
6904 }
6905
6906 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
6907 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
6908 uint32_t const uCr4 = pVCpu->cpum.GstCtx.cr4;
6909 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
6910 switch (uInvpcidType)
6911 {
6912 case X86_INVPCID_TYPE_INDV_ADDR:
6913 {
6914 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
6915 {
6916 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
6917 return iemRaiseGeneralProtectionFault0(pVCpu);
6918 }
6919 if ( !(uCr4 & X86_CR4_PCIDE)
6920 && uPcid != 0)
6921 {
6922 Log(("invpcid: invalid pcid %#x\n", uPcid));
6923 return iemRaiseGeneralProtectionFault0(pVCpu);
6924 }
6925
6926 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
6927/** @todo PGMFlushTLB is overkill for X86_INVPCID_TYPE_INDV_ADDR. Add a fGlobal parameter
6928 * to PGMInvalidatePage or add a new function to support this variation of invlpg. */
6929 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6930 break;
6931 }
6932
6933 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
6934 {
6935 if ( !(uCr4 & X86_CR4_PCIDE)
6936 && uPcid != 0)
6937 {
6938 Log(("invpcid: invalid pcid %#x\n", uPcid));
6939 return iemRaiseGeneralProtectionFault0(pVCpu);
6940 }
6941 /* Invalidate all mappings associated with PCID except global translations. */
6942 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6943 break;
6944 }
6945
6946 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
6947 {
6948 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
6949 break;
6950 }
6951
6952 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
6953 {
6954 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6955 break;
6956 }
6957 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6958 }
6959 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6960 }
6961 return rcStrict;
6962}
6963
6964
6965/**
6966 * Implements INVD.
6967 */
6968IEM_CIMPL_DEF_0(iemCImpl_invd)
6969{
6970 if (IEM_GET_CPL(pVCpu) != 0)
6971 {
6972 Log(("invd: CPL != 0 -> #GP(0)\n"));
6973 return iemRaiseGeneralProtectionFault0(pVCpu);
6974 }
6975
6976 if (!IEM_IS_IN_GUEST(pVCpu))
6977 { /* probable */ }
6978 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6979 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_INVD, cbInstr);
6980 else
6981 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0, cbInstr);
6982
6983 /* We currently take no action here. */
6984 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6985}
6986
6987
6988/**
6989 * Implements WBINVD.
6990 */
6991IEM_CIMPL_DEF_0(iemCImpl_wbinvd)
6992{
6993 if (IEM_GET_CPL(pVCpu) != 0)
6994 {
6995 Log(("wbinvd: CPL != 0 -> #GP(0)\n"));
6996 return iemRaiseGeneralProtectionFault0(pVCpu);
6997 }
6998
6999 if (!IEM_IS_IN_GUEST(pVCpu))
7000 { /* probable */ }
7001 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7002 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WBINVD, cbInstr);
7003 else
7004 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0, cbInstr);
7005
7006 /* We currently take no action here. */
7007 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7008}
7009
7010
7011/** Opcode 0x0f 0xaa. */
7012IEM_CIMPL_DEF_0(iemCImpl_rsm)
7013{
7014 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0, cbInstr);
7015 NOREF(cbInstr);
7016 return iemRaiseUndefinedOpcode(pVCpu);
7017}
7018
7019
7020/**
7021 * Implements RDTSC.
7022 */
7023IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
7024{
7025 /*
7026 * Check preconditions.
7027 */
7028 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
7029 return iemRaiseUndefinedOpcode(pVCpu);
7030
7031 if (IEM_GET_CPL(pVCpu) != 0)
7032 {
7033 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7034 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
7035 {
7036 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
7037 return iemRaiseGeneralProtectionFault0(pVCpu);
7038 }
7039 }
7040
7041 if (!IEM_IS_IN_GUEST(pVCpu))
7042 { /* probable */ }
7043 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7044 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
7045 {
7046 Log(("rdtsc: Guest intercept -> VM-exit\n"));
7047 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSC, cbInstr);
7048 }
7049 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
7050 {
7051 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
7052 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7053 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7054 }
7055
7056 /*
7057 * Do the job.
7058 */
7059 uint64_t uTicks = TMCpuTickGet(pVCpu);
7060#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7061 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7062#endif
7063 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7064 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7065 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); /* For IEMExecDecodedRdtsc. */
7066 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7067}
7068
7069
7070/**
7071 * Implements RDTSC.
7072 */
7073IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
7074{
7075 /*
7076 * Check preconditions.
7077 */
7078 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
7079 return iemRaiseUndefinedOpcode(pVCpu);
7080
7081 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7082 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDTSCP)))
7083 { /* likely */ }
7084 else
7085 {
7086 Log(("rdtscp: Not enabled for VMX non-root mode -> #UD\n"));
7087 return iemRaiseUndefinedOpcode(pVCpu);
7088 }
7089
7090 if (IEM_GET_CPL(pVCpu) != 0)
7091 {
7092 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7093 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
7094 {
7095 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
7096 return iemRaiseGeneralProtectionFault0(pVCpu);
7097 }
7098 }
7099
7100 if (!IEM_IS_IN_GUEST(pVCpu))
7101 { /* probable */ }
7102 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7103 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
7104 {
7105 Log(("rdtscp: Guest intercept -> VM-exit\n"));
7106 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSCP, cbInstr);
7107 }
7108 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
7109 {
7110 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
7111 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7112 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7113 }
7114
7115 /*
7116 * Do the job.
7117 * Query the MSR first in case of trips to ring-3.
7118 */
7119 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
7120 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pVCpu->cpum.GstCtx.rcx);
7121 if (rcStrict == VINF_SUCCESS)
7122 {
7123 /* Low dword of the TSC_AUX msr only. */
7124 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
7125
7126 uint64_t uTicks = TMCpuTickGet(pVCpu);
7127#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7128 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7129#endif
7130 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7131 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7132 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX); /* For IEMExecDecodedRdtscp. */
7133 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7134 }
7135 return rcStrict;
7136}
7137
7138
7139/**
7140 * Implements RDPMC.
7141 */
7142IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
7143{
7144 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7145
7146 if ( IEM_GET_CPL(pVCpu) != 0
7147 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCE))
7148 return iemRaiseGeneralProtectionFault0(pVCpu);
7149
7150 if (!IEM_IS_IN_GUEST(pVCpu))
7151 { /* probable */ }
7152 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7153 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDPMC_EXIT))
7154 {
7155 Log(("rdpmc: Guest intercept -> VM-exit\n"));
7156 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDPMC, cbInstr);
7157 }
7158 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
7159 {
7160 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
7161 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7162 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7163 }
7164
7165 /** @todo Emulate performance counters, for now just return 0. */
7166 pVCpu->cpum.GstCtx.rax = 0;
7167 pVCpu->cpum.GstCtx.rdx = 0;
7168 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7169 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
7170 * ecx but see @bugref{3472}! */
7171
7172 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7173}
7174
7175
7176/**
7177 * Implements RDMSR.
7178 */
7179IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
7180{
7181 /*
7182 * Check preconditions.
7183 */
7184 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7185 return iemRaiseUndefinedOpcode(pVCpu);
7186 if (IEM_GET_CPL(pVCpu) != 0)
7187 return iemRaiseGeneralProtectionFault0(pVCpu);
7188
7189 /*
7190 * Check nested-guest intercepts.
7191 */
7192 if (!IEM_IS_IN_GUEST(pVCpu))
7193 { /* probable */ }
7194#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7195 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7196 {
7197 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_RDMSR, pVCpu->cpum.GstCtx.ecx))
7198 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDMSR, cbInstr);
7199 }
7200#endif
7201#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7202 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7203 {
7204 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, false /* fWrite */, cbInstr);
7205 if (rcStrict == VINF_SVM_VMEXIT)
7206 return VINF_SUCCESS;
7207 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7208 {
7209 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
7210 return rcStrict;
7211 }
7212 }
7213#endif
7214
7215 /*
7216 * Do the job.
7217 */
7218 RTUINT64U uValue;
7219 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7220 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7221
7222 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, &uValue.u);
7223 if (rcStrict == VINF_SUCCESS)
7224 {
7225 pVCpu->cpum.GstCtx.rax = uValue.s.Lo;
7226 pVCpu->cpum.GstCtx.rdx = uValue.s.Hi;
7227 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7228
7229 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7230 }
7231
7232#ifndef IN_RING3
7233 /* Deferred to ring-3. */
7234 if (rcStrict == VINF_CPUM_R3_MSR_READ)
7235 {
7236 Log(("IEM: rdmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx));
7237 return rcStrict;
7238 }
7239#endif
7240
7241 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7242 if (pVCpu->iem.s.cLogRelRdMsr < 32)
7243 {
7244 pVCpu->iem.s.cLogRelRdMsr++;
7245 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7246 }
7247 else
7248 Log(( "IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7249 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7250 return iemRaiseGeneralProtectionFault0(pVCpu);
7251}
7252
7253
7254/**
7255 * Implements WRMSR.
7256 */
7257IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
7258{
7259 /*
7260 * Check preconditions.
7261 */
7262 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7263 return iemRaiseUndefinedOpcode(pVCpu);
7264 if (IEM_GET_CPL(pVCpu) != 0)
7265 return iemRaiseGeneralProtectionFault0(pVCpu);
7266
7267 RTUINT64U uValue;
7268 uValue.s.Lo = pVCpu->cpum.GstCtx.eax;
7269 uValue.s.Hi = pVCpu->cpum.GstCtx.edx;
7270
7271 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7272
7273 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7274 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7275
7276 /*
7277 * Check nested-guest intercepts.
7278 */
7279 if (!IEM_IS_IN_GUEST(pVCpu))
7280 { /* probable */ }
7281#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7282 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7283 {
7284 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, idMsr))
7285 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WRMSR, cbInstr);
7286 }
7287#endif
7288#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7289 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7290 {
7291 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, idMsr, true /* fWrite */, cbInstr);
7292 if (rcStrict == VINF_SVM_VMEXIT)
7293 return VINF_SUCCESS;
7294 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7295 {
7296 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", idMsr, VBOXSTRICTRC_VAL(rcStrict)));
7297 return rcStrict;
7298 }
7299 }
7300#endif
7301
7302 if (idMsr == MSR_K6_EFER)
7303 IEMTLBTRACE_LOAD_EFER(pVCpu, uValue.u, pVCpu->cpum.GstCtx.msrEFER);
7304
7305 /*
7306 * Do the job.
7307 */
7308 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, idMsr, uValue.u);
7309 if (rcStrict == VINF_SUCCESS)
7310 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7311
7312#ifndef IN_RING3
7313 /* Deferred to ring-3. */
7314 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
7315 {
7316 Log(("IEM: wrmsr(%#x) -> ring-3\n", idMsr));
7317 return rcStrict;
7318 }
7319#endif
7320
7321 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7322 if (pVCpu->iem.s.cLogRelWrMsr < 32)
7323 {
7324 pVCpu->iem.s.cLogRelWrMsr++;
7325 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7326 }
7327 else
7328 Log(( "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7329 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7330 return iemRaiseGeneralProtectionFault0(pVCpu);
7331}
7332
7333
7334/**
7335 * Implements 'IN eAX, port'.
7336 *
7337 * @param u16Port The source port.
7338 * @param cbReg The register size.
7339 * @param bImmAndEffAddrMode Bit 7: Whether the port was specified through an
7340 * immediate operand or the implicit DX register.
7341 * Bits 3-0: Effective address mode.
7342 */
7343IEM_CIMPL_DEF_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode)
7344{
7345 /*
7346 * GCM intercept.
7347 *
7348 * This must be placed before the IOPL check as the mesa driver intercept
7349 * would otherwise trigger a #GP(0).
7350 */
7351 if (!IEM_IS_IN_GUEST(pVCpu) && GCMIsInterceptingIOPortRead(pVCpu, u16Port, cbReg))
7352 {
7353 VBOXSTRICTRC rcStrict = GCMInterceptedIOPortRead(pVCpu, &pVCpu->cpum.GstCtx, u16Port, cbReg);
7354 if (rcStrict == VINF_GCM_HANDLED_ADVANCE_RIP || rcStrict == VINF_GCM_HANDLED)
7355 {
7356 Log(("iemCImpl_in: u16Port=%#x cbReg=%d was handled by GCMIOPortRead (%d)\n", u16Port, cbReg, VBOXSTRICTRC_VAL(rcStrict)));
7357 if (rcStrict == VINF_GCM_HANDLED_ADVANCE_RIP)
7358 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7359 else
7360 rcStrict = VINF_SUCCESS;
7361 return rcStrict;
7362 }
7363 Assert(rcStrict == VERR_GCM_NOT_HANDLED);
7364 }
7365
7366 /*
7367 * CPL check
7368 */
7369 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7370 if (rcStrict != VINF_SUCCESS)
7371 return rcStrict;
7372
7373 if (!IEM_IS_IN_GUEST(pVCpu))
7374 { /* probable */ }
7375
7376 /*
7377 * Check VMX nested-guest IO intercept.
7378 */
7379#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7380 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7381 {
7382 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_IN, u16Port, RT_BOOL(bImmAndEffAddrMode & 0x80), cbReg, cbInstr);
7383 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7384 return rcStrict;
7385 }
7386#endif
7387
7388 /*
7389 * Check SVM nested-guest IO intercept.
7390 */
7391#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7392 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7393 {
7394 uint8_t cAddrSizeBits;
7395 switch (bImmAndEffAddrMode & 0xf)
7396 {
7397 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7398 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7399 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7400 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7401 }
7402 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7403 false /* fRep */, false /* fStrIo */, cbInstr);
7404 if (rcStrict == VINF_SVM_VMEXIT)
7405 return VINF_SUCCESS;
7406 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7407 {
7408 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7409 VBOXSTRICTRC_VAL(rcStrict)));
7410 return rcStrict;
7411 }
7412 }
7413#endif
7414#if !defined(VBOX_WITH_NESTED_HWVIRT_VMX) && !defined(VBOX_WITH_NESTED_HWVIRT_SVM)
7415 RT_NOREF(bImmAndEffAddrMode);
7416#endif
7417
7418 /*
7419 * Perform the I/O.
7420 */
7421 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7422 uint32_t u32Value = 0;
7423 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, cbReg);
7424 if (IOM_SUCCESS(rcStrict))
7425 {
7426 switch (cbReg)
7427 {
7428 case 1: pVCpu->cpum.GstCtx.al = (uint8_t)u32Value; break;
7429 case 2: pVCpu->cpum.GstCtx.ax = (uint16_t)u32Value; break;
7430 case 4: pVCpu->cpum.GstCtx.rax = u32Value; break;
7431 default: AssertFailedReturn(VERR_IEM_IPE_3);
7432 }
7433
7434 pVCpu->iem.s.cPotentialExits++;
7435 if (rcStrict != VINF_SUCCESS)
7436 iemSetPassUpStatus(pVCpu, rcStrict);
7437
7438 /*
7439 * Check for I/O breakpoints before we complete the instruction.
7440 */
7441 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7442 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7443 && X86_DR7_ANY_RW_IO(fDr7)
7444 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7445 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7446 && rcStrict == VINF_SUCCESS))
7447 {
7448 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7449 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7450 }
7451
7452 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7453 }
7454
7455 return rcStrict;
7456}
7457
7458
7459/**
7460 * Implements 'IN eAX, DX'.
7461 *
7462 * @param cbReg The register size.
7463 * @param enmEffAddrMode Effective address mode.
7464 */
7465IEM_CIMPL_DEF_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode)
7466{
7467 return IEM_CIMPL_CALL_3(iemCImpl_in, pVCpu->cpum.GstCtx.dx, cbReg, 0 /* fImm */ | enmEffAddrMode);
7468}
7469
7470
7471/**
7472 * Implements 'OUT port, eAX'.
7473 *
7474 * @param u16Port The destination port.
7475 * @param cbReg The register size.
7476 * @param bImmAndEffAddrMode Bit 7: Whether the port was specified through an
7477 * immediate operand or the implicit DX register.
7478 * Bits 3-0: Effective address mode.
7479 */
7480IEM_CIMPL_DEF_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode)
7481{
7482 /*
7483 * CPL check
7484 */
7485 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7486 if (rcStrict != VINF_SUCCESS)
7487 return rcStrict;
7488
7489 if (!IEM_IS_IN_GUEST(pVCpu))
7490 { /* probable */ }
7491
7492 /*
7493 * Check VMX nested-guest I/O intercept.
7494 */
7495#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7496 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7497 {
7498 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_OUT, u16Port, RT_BOOL(bImmAndEffAddrMode & 0x80), cbReg, cbInstr);
7499 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7500 return rcStrict;
7501 }
7502#endif
7503
7504 /*
7505 * Check SVM nested-guest I/O intercept.
7506 */
7507#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7508 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7509 {
7510 uint8_t cAddrSizeBits;
7511 switch (bImmAndEffAddrMode & 0xf)
7512 {
7513 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7514 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7515 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7516 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7517 }
7518 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7519 false /* fRep */, false /* fStrIo */, cbInstr);
7520 if (rcStrict == VINF_SVM_VMEXIT)
7521 return VINF_SUCCESS;
7522 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7523 {
7524 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7525 VBOXSTRICTRC_VAL(rcStrict)));
7526 return rcStrict;
7527 }
7528 }
7529#endif
7530#if !defined(VBOX_WITH_NESTED_HWVIRT_VMX) && !defined(VBOX_WITH_NESTED_HWVIRT_SVM)
7531 RT_NOREF(bImmAndEffAddrMode);
7532#endif
7533
7534 /*
7535 * Perform the I/O.
7536 */
7537 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7538 uint32_t u32Value;
7539 switch (cbReg)
7540 {
7541 case 1: u32Value = pVCpu->cpum.GstCtx.al; break;
7542 case 2: u32Value = pVCpu->cpum.GstCtx.ax; break;
7543 case 4: u32Value = pVCpu->cpum.GstCtx.eax; break;
7544 default: AssertFailedReturn(VERR_IEM_IPE_4);
7545 }
7546 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, cbReg);
7547 if (IOM_SUCCESS(rcStrict))
7548 {
7549 pVCpu->iem.s.cPotentialExits++;
7550 if (rcStrict != VINF_SUCCESS)
7551 iemSetPassUpStatus(pVCpu, rcStrict);
7552
7553 /*
7554 * Check for I/O breakpoints before we complete the instruction.
7555 */
7556 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7557 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7558 && X86_DR7_ANY_RW_IO(fDr7)
7559 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7560 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7561 && rcStrict == VINF_SUCCESS))
7562 {
7563 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7564 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7565 }
7566
7567 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7568 }
7569 return rcStrict;
7570}
7571
7572
7573/**
7574 * Implements 'OUT DX, eAX'.
7575 *
7576 * @param cbReg The register size.
7577 * @param enmEffAddrMode Effective address mode.
7578 */
7579IEM_CIMPL_DEF_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode)
7580{
7581 return IEM_CIMPL_CALL_3(iemCImpl_out, pVCpu->cpum.GstCtx.dx, cbReg, 0 /* fImm */ | enmEffAddrMode);
7582}
7583
7584
7585/**
7586 * Implements 'CLI'.
7587 */
7588IEM_CIMPL_DEF_0(iemCImpl_cli)
7589{
7590 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7591#ifdef LOG_ENABLED
7592 uint32_t const fEflOld = fEfl;
7593#endif
7594
7595 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7596 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7597 {
7598 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7599 if (!(fEfl & X86_EFL_VM))
7600 {
7601 if (IEM_GET_CPL(pVCpu) <= uIopl)
7602 fEfl &= ~X86_EFL_IF;
7603 else if ( IEM_GET_CPL(pVCpu) == 3
7604 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) )
7605 fEfl &= ~X86_EFL_VIF;
7606 else
7607 return iemRaiseGeneralProtectionFault0(pVCpu);
7608 }
7609 /* V8086 */
7610 else if (uIopl == 3)
7611 fEfl &= ~X86_EFL_IF;
7612 else if ( uIopl < 3
7613 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
7614 fEfl &= ~X86_EFL_VIF;
7615 else
7616 return iemRaiseGeneralProtectionFault0(pVCpu);
7617 }
7618 /* real mode */
7619 else
7620 fEfl &= ~X86_EFL_IF;
7621
7622 /* Commit. */
7623 IEMMISC_SET_EFL(pVCpu, fEfl);
7624 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7625 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl));
7626 return rcStrict;
7627}
7628
7629
7630/**
7631 * Implements 'STI'.
7632 */
7633IEM_CIMPL_DEF_0(iemCImpl_sti)
7634{
7635 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7636 uint32_t const fEflOld = fEfl;
7637
7638 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7639 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7640 {
7641 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7642 if (!(fEfl & X86_EFL_VM))
7643 {
7644 if (IEM_GET_CPL(pVCpu) <= uIopl)
7645 fEfl |= X86_EFL_IF;
7646 else if ( IEM_GET_CPL(pVCpu) == 3
7647 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI)
7648 && !(fEfl & X86_EFL_VIP) )
7649 fEfl |= X86_EFL_VIF;
7650 else
7651 return iemRaiseGeneralProtectionFault0(pVCpu);
7652 }
7653 /* V8086 */
7654 else if (uIopl == 3)
7655 fEfl |= X86_EFL_IF;
7656 else if ( uIopl < 3
7657 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)
7658 && !(fEfl & X86_EFL_VIP) )
7659 fEfl |= X86_EFL_VIF;
7660 else
7661 return iemRaiseGeneralProtectionFault0(pVCpu);
7662 }
7663 /* real mode */
7664 else
7665 fEfl |= X86_EFL_IF;
7666
7667 /*
7668 * Commit.
7669 *
7670 * Note! Setting the shadow interrupt flag must be done after RIP updating.
7671 */
7672 IEMMISC_SET_EFL(pVCpu, fEfl);
7673 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7674 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF))
7675 {
7676 /** @todo only set it the shadow flag if it was clear before? */
7677 CPUMSetInInterruptShadowSti(&pVCpu->cpum.GstCtx);
7678 }
7679 pVCpu->iem.s.fTbCurInstrIsSti = true;
7680 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
7681 return rcStrict;
7682}
7683
7684
7685/**
7686 * Implements 'HLT'.
7687 */
7688IEM_CIMPL_DEF_0(iemCImpl_hlt)
7689{
7690 if (IEM_GET_CPL(pVCpu) != 0)
7691 return iemRaiseGeneralProtectionFault0(pVCpu);
7692
7693 if (!IEM_IS_IN_GUEST(pVCpu))
7694 { /* probable */ }
7695 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7696 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_HLT_EXIT))
7697 {
7698 Log2(("hlt: Guest intercept -> VM-exit\n"));
7699 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_HLT, cbInstr);
7700 }
7701 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
7702 {
7703 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
7704 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7705 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7706 }
7707
7708 /** @todo finish: This ASSUMES that iemRegAddToRipAndFinishingClearingRF won't
7709 * be returning any status codes relating to non-guest events being raised, as
7710 * we'll mess up the guest HALT otherwise. */
7711 VBOXSTRICTRC rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7712 if (rcStrict == VINF_SUCCESS)
7713 rcStrict = VINF_EM_HALT;
7714 return rcStrict;
7715}
7716
7717
7718/**
7719 * Implements 'MONITOR'.
7720 */
7721IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
7722{
7723 /*
7724 * Permission checks.
7725 */
7726 if (IEM_GET_CPL(pVCpu) != 0)
7727 {
7728 Log2(("monitor: CPL != 0\n"));
7729 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
7730 }
7731 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7732 {
7733 Log2(("monitor: Not in CPUID\n"));
7734 return iemRaiseUndefinedOpcode(pVCpu);
7735 }
7736
7737 /*
7738 * Check VMX guest-intercept.
7739 * This should be considered a fault-like VM-exit.
7740 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
7741 */
7742 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7743 || !IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MONITOR_EXIT))
7744 { /* probable */ }
7745 else
7746 {
7747 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7748 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_MONITOR, cbInstr);
7749 }
7750
7751 /*
7752 * Gather the operands and validate them.
7753 */
7754 RTGCPTR GCPtrMem = IEM_IS_64BIT_CODE(pVCpu) ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
7755 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7756 uint32_t uEdx = pVCpu->cpum.GstCtx.edx;
7757/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
7758 * \#GP first. */
7759 if (uEcx != 0)
7760 {
7761 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
7762 return iemRaiseGeneralProtectionFault0(pVCpu);
7763 }
7764
7765 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
7766 if (rcStrict != VINF_SUCCESS)
7767 return rcStrict;
7768
7769 RTGCPHYS GCPhysMem;
7770 /** @todo access size */
7771 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7772 if (rcStrict != VINF_SUCCESS)
7773 return rcStrict;
7774
7775 if (!IEM_IS_IN_GUEST(pVCpu))
7776 { /* probable */ }
7777#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7778 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7779 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
7780 {
7781 /*
7782 * MONITOR does not access the memory, just monitors the address. However,
7783 * if the address falls in the APIC-access page, the address monitored must
7784 * instead be the corresponding address in the virtual-APIC page.
7785 *
7786 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
7787 */
7788 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
7789 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
7790 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
7791 return rcStrict;
7792 }
7793#endif
7794 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
7795 {
7796 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7797 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7798 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7799 }
7800
7801 /*
7802 * Call EM to prepare the monitor/wait.
7803 */
7804 rcStrict = EMMonitorWaitPrepare(pVCpu, pVCpu->cpum.GstCtx.rax, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.rdx, GCPhysMem);
7805 Assert(rcStrict == VINF_SUCCESS);
7806 if (rcStrict == VINF_SUCCESS)
7807 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7808 return rcStrict;
7809}
7810
7811
7812/**
7813 * Implements 'MWAIT'.
7814 */
7815IEM_CIMPL_DEF_0(iemCImpl_mwait)
7816{
7817 /*
7818 * Permission checks.
7819 */
7820 if (IEM_GET_CPL(pVCpu) != 0)
7821 {
7822 Log2(("mwait: CPL != 0\n"));
7823 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
7824 * EFLAGS.VM then.) */
7825 return iemRaiseUndefinedOpcode(pVCpu);
7826 }
7827 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7828 {
7829 Log2(("mwait: Not in CPUID\n"));
7830 return iemRaiseUndefinedOpcode(pVCpu);
7831 }
7832
7833 /* Check VMX nested-guest intercept. */
7834 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7835 || !IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MWAIT_EXIT))
7836 { /* probable */ }
7837 else
7838 IEM_VMX_VMEXIT_MWAIT_RET(pVCpu, EMMonitorIsArmed(pVCpu), cbInstr);
7839
7840 /*
7841 * Gather the operands and validate them.
7842 */
7843 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7844 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7845 if (uEcx != 0)
7846 {
7847 /* Only supported extension is break on IRQ when IF=0. */
7848 if (uEcx > 1)
7849 {
7850 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
7851 return iemRaiseGeneralProtectionFault0(pVCpu);
7852 }
7853 uint32_t fMWaitFeatures = 0;
7854 uint32_t uIgnore = 0;
7855 CPUMGetGuestCpuId(pVCpu, 5, 0, -1 /*f64BitMode*/, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
7856 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7857 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7858 {
7859 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
7860 return iemRaiseGeneralProtectionFault0(pVCpu);
7861 }
7862
7863#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7864 /*
7865 * If the interrupt-window exiting control is set or a virtual-interrupt is pending
7866 * for delivery; and interrupts are disabled the processor does not enter its
7867 * mwait state but rather passes control to the next instruction.
7868 *
7869 * See Intel spec. 25.3 "Changes to Instruction Behavior In VMX Non-root Operation".
7870 */
7871 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7872 || pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
7873 { /* probable */ }
7874 else if ( IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INT_WINDOW_EXIT)
7875 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
7876 /** @todo finish: check up this out after we move int window stuff out of the
7877 * run loop and into the instruction finishing logic here. */
7878 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7879#endif
7880 }
7881
7882 /*
7883 * Check SVM nested-guest mwait intercepts.
7884 */
7885 if (!IEM_IS_IN_GUEST(pVCpu))
7886 { /* probable */ }
7887 else if ( IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
7888 && EMMonitorIsArmed(pVCpu))
7889 {
7890 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
7891 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7892 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7893 }
7894 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
7895 {
7896 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
7897 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7898 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7899 }
7900
7901 /*
7902 * Call EM to prepare the monitor/wait.
7903 *
7904 * This will return VINF_EM_HALT. If there the trap flag is set, we may
7905 * override it when executing iemRegAddToRipAndFinishingClearingRF ASSUMING
7906 * that will only return guest related events.
7907 */
7908 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
7909
7910 /** @todo finish: This needs more thinking as we should suppress internal
7911 * debugger events here, or we'll bugger up the guest state even more than we
7912 * alread do around VINF_EM_HALT. */
7913 VBOXSTRICTRC rcStrict2 = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7914 if (rcStrict2 != VINF_SUCCESS)
7915 {
7916 Log2(("mwait: %Rrc (perform) -> %Rrc (finish)!\n", VBOXSTRICTRC_VAL(rcStrict), VBOXSTRICTRC_VAL(rcStrict2) ));
7917 rcStrict = rcStrict2;
7918 }
7919
7920 return rcStrict;
7921}
7922
7923
7924/**
7925 * Implements 'SWAPGS'.
7926 */
7927IEM_CIMPL_DEF_0(iemCImpl_swapgs)
7928{
7929 Assert(IEM_IS_64BIT_CODE(pVCpu)); /* Caller checks this. */
7930
7931 /*
7932 * Permission checks.
7933 */
7934 if (IEM_GET_CPL(pVCpu) != 0)
7935 {
7936 Log2(("swapgs: CPL != 0\n"));
7937 return iemRaiseUndefinedOpcode(pVCpu);
7938 }
7939
7940 /*
7941 * Do the job.
7942 */
7943 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_GS);
7944 uint64_t uOtherGsBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
7945 pVCpu->cpum.GstCtx.msrKERNELGSBASE = pVCpu->cpum.GstCtx.gs.u64Base;
7946 pVCpu->cpum.GstCtx.gs.u64Base = uOtherGsBase;
7947
7948 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7949}
7950
7951
7952#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
7953/**
7954 * Handles a CPUID call.
7955 */
7956static VBOXSTRICTRC iemCpuIdVBoxCall(PVMCPUCC pVCpu, uint32_t iFunction,
7957 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
7958{
7959 switch (iFunction)
7960 {
7961 case VBOX_CPUID_FN_ID:
7962 LogFlow(("iemCpuIdVBoxCall: VBOX_CPUID_FN_ID\n"));
7963 *pEax = VBOX_CPUID_RESP_ID_EAX;
7964 *pEbx = VBOX_CPUID_RESP_ID_EBX;
7965 *pEcx = VBOX_CPUID_RESP_ID_ECX;
7966 *pEdx = VBOX_CPUID_RESP_ID_EDX;
7967 break;
7968
7969 case VBOX_CPUID_FN_LOG:
7970 {
7971 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX | CPUMCTX_EXTRN_RSI
7972 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7973
7974 /* Validate input. */
7975 uint32_t cchToLog = *pEdx;
7976 if (cchToLog <= _2M)
7977 {
7978 uint32_t const uLogPicker = *pEbx;
7979 if (uLogPicker <= 1)
7980 {
7981 /* Resolve the logger. */
7982 PRTLOGGER const pLogger = !uLogPicker
7983 ? RTLogDefaultInstanceEx(UINT32_MAX) : RTLogRelGetDefaultInstanceEx(UINT32_MAX);
7984 if (pLogger)
7985 {
7986 /* Copy over the data: */
7987 RTGCPTR GCPtrSrc = pVCpu->cpum.GstCtx.rsi;
7988 while (cchToLog > 0)
7989 {
7990 uint32_t cbToMap = GUEST_PAGE_SIZE - (GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
7991 if (cbToMap > cchToLog)
7992 cbToMap = cchToLog;
7993 /** @todo Extend iemMemMap to allowing page size accessing and avoid 7
7994 * unnecessary calls & iterations per pages. */
7995 if (cbToMap > 512)
7996 cbToMap = 512;
7997 uint8_t bUnmapInfo;
7998 void *pvSrc = NULL;
7999 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, &bUnmapInfo, cbToMap,
8000 UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0);
8001 if (rcStrict == VINF_SUCCESS)
8002 {
8003 RTLogBulkNestedWrite(pLogger, (const char *)pvSrc, cbToMap, "Gst:");
8004 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8005 AssertRCSuccessReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
8006 }
8007 else
8008 {
8009 Log(("iemCpuIdVBoxCall: %Rrc at %RGp LB %#x\n", VBOXSTRICTRC_VAL(rcStrict), GCPtrSrc, cbToMap));
8010 return rcStrict;
8011 }
8012
8013 /* Advance. */
8014 pVCpu->cpum.GstCtx.rsi = GCPtrSrc += cbToMap;
8015 *pEdx = cchToLog -= cbToMap;
8016 }
8017 *pEax = VINF_SUCCESS;
8018 }
8019 else
8020 *pEax = (uint32_t)VERR_NOT_FOUND;
8021 }
8022 else
8023 *pEax = (uint32_t)VERR_NOT_FOUND;
8024 }
8025 else
8026 *pEax = (uint32_t)VERR_TOO_MUCH_DATA;
8027 *pEdx = VBOX_CPUID_RESP_GEN_EDX;
8028 *pEcx = VBOX_CPUID_RESP_GEN_ECX;
8029 *pEbx = VBOX_CPUID_RESP_GEN_EBX;
8030 break;
8031 }
8032
8033 default:
8034 LogFlow(("iemCpuIdVBoxCall: Invalid function %#x (%#x, %#x)\n", iFunction, *pEbx, *pEdx));
8035 *pEax = (uint32_t)VERR_INVALID_FUNCTION;
8036 *pEbx = (uint32_t)VERR_INVALID_FUNCTION;
8037 *pEcx = (uint32_t)VERR_INVALID_FUNCTION;
8038 *pEdx = (uint32_t)VERR_INVALID_FUNCTION;
8039 break;
8040 }
8041 return VINF_SUCCESS;
8042}
8043#endif /* VBOX_WITHOUT_CPUID_HOST_CALL */
8044
8045/**
8046 * Implements 'CPUID'.
8047 */
8048IEM_CIMPL_DEF_0(iemCImpl_cpuid)
8049{
8050 if (!IEM_IS_IN_GUEST(pVCpu))
8051 { /* probable */ }
8052 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8053 {
8054 Log2(("cpuid: Guest intercept -> VM-exit\n"));
8055 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_CPUID, cbInstr);
8056 }
8057 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
8058 {
8059 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
8060 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
8061 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8062 }
8063
8064
8065 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
8066 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
8067
8068#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
8069 /*
8070 * CPUID host call backdoor.
8071 */
8072 if ( uEax == VBOX_CPUID_REQ_EAX_FIXED
8073 && (uEcx & VBOX_CPUID_REQ_ECX_FIXED_MASK) == VBOX_CPUID_REQ_ECX_FIXED
8074 && pVCpu->CTX_SUFF(pVM)->iem.s.fCpuIdHostCall)
8075 {
8076 VBOXSTRICTRC rcStrict = iemCpuIdVBoxCall(pVCpu, uEcx & VBOX_CPUID_REQ_ECX_FN_MASK,
8077 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx,
8078 &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
8079 if (rcStrict != VINF_SUCCESS)
8080 return rcStrict;
8081 }
8082 /*
8083 * Regular CPUID.
8084 */
8085 else
8086#endif
8087 CPUMGetGuestCpuId(pVCpu, uEax, uEcx, pVCpu->cpum.GstCtx.cs.Attr.n.u1Long,
8088 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
8089
8090 pVCpu->cpum.GstCtx.rax &= UINT32_C(0xffffffff);
8091 pVCpu->cpum.GstCtx.rbx &= UINT32_C(0xffffffff);
8092 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
8093 pVCpu->cpum.GstCtx.rdx &= UINT32_C(0xffffffff);
8094 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
8095
8096 pVCpu->iem.s.cPotentialExits++;
8097 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8098}
8099
8100
8101/**
8102 * Implements 'AAD'.
8103 *
8104 * @param bImm The immediate operand.
8105 */
8106IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
8107{
8108 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8109 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
8110 pVCpu->cpum.GstCtx.ax = al;
8111 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8112 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8113 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8114
8115 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8116}
8117
8118
8119/**
8120 * Implements 'AAM'.
8121 *
8122 * @param bImm The immediate operand. Cannot be 0.
8123 */
8124IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
8125{
8126 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
8127
8128 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8129 uint8_t const al = (uint8_t)ax % bImm;
8130 uint8_t const ah = (uint8_t)ax / bImm;
8131 pVCpu->cpum.GstCtx.ax = (ah << 8) + al;
8132 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8133 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8134 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8135
8136 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8137}
8138
8139
8140/**
8141 * Implements 'DAA'.
8142 */
8143IEM_CIMPL_DEF_0(iemCImpl_daa)
8144{
8145 uint8_t const al = pVCpu->cpum.GstCtx.al;
8146 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8147
8148 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8149 || (al & 0xf) >= 10)
8150 {
8151 pVCpu->cpum.GstCtx.al = al + 6;
8152 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8153 }
8154 else
8155 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8156
8157 if (al >= 0x9a || fCarry)
8158 {
8159 pVCpu->cpum.GstCtx.al += 0x60;
8160 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8161 }
8162 else
8163 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8164
8165 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8166 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8167}
8168
8169
8170/**
8171 * Implements 'DAS'.
8172 */
8173IEM_CIMPL_DEF_0(iemCImpl_das)
8174{
8175 uint8_t const uInputAL = pVCpu->cpum.GstCtx.al;
8176 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8177
8178 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8179 || (uInputAL & 0xf) >= 10)
8180 {
8181 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8182 if (uInputAL < 6)
8183 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8184 pVCpu->cpum.GstCtx.al = uInputAL - 6;
8185 }
8186 else
8187 {
8188 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8189 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8190 }
8191
8192 if (uInputAL >= 0x9a || fCarry)
8193 {
8194 pVCpu->cpum.GstCtx.al -= 0x60;
8195 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8196 }
8197
8198 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8199 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8200}
8201
8202
8203/**
8204 * Implements 'AAA'.
8205 */
8206IEM_CIMPL_DEF_0(iemCImpl_aaa)
8207{
8208 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8209 {
8210 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8211 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8212 {
8213 pVCpu->cpum.GstCtx.eflags.uBoth = iemAImpl_add_u16(pVCpu->cpum.GstCtx.eflags.uBoth, &pVCpu->cpum.GstCtx.ax, 0x106);
8214 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8215 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8216 }
8217 else
8218 {
8219 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8220 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8221 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8222 }
8223 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8224 }
8225 else
8226 {
8227 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8228 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8229 {
8230 pVCpu->cpum.GstCtx.ax += UINT16_C(0x106);
8231 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8232 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8233 }
8234 else
8235 {
8236 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8237 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8238 }
8239 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8240 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8241 }
8242
8243 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8244}
8245
8246
8247/**
8248 * Implements 'AAS'.
8249 */
8250IEM_CIMPL_DEF_0(iemCImpl_aas)
8251{
8252 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8253 {
8254 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8255 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8256 {
8257 pVCpu->cpum.GstCtx.eflags.uBoth = iemAImpl_sub_u16(pVCpu->cpum.GstCtx.eflags.uBoth, &pVCpu->cpum.GstCtx.ax, 0x106);
8258 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8259 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8260 }
8261 else
8262 {
8263 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8264 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8265 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8266 }
8267 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8268 }
8269 else
8270 {
8271 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8272 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8273 {
8274 pVCpu->cpum.GstCtx.ax -= UINT16_C(0x106);
8275 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8276 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8277 }
8278 else
8279 {
8280 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8281 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8282 }
8283 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8284 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8285 }
8286
8287 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8288}
8289
8290
8291/**
8292 * Implements the 16-bit version of 'BOUND'.
8293 *
8294 * @note We have separate 16-bit and 32-bit variants of this function due to
8295 * the decoder using unsigned parameters, whereas we want signed one to
8296 * do the job. This is significant for a recompiler.
8297 */
8298IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
8299{
8300 /*
8301 * Check if the index is inside the bounds, otherwise raise #BR.
8302 */
8303 if ( idxArray >= idxLowerBound
8304 && idxArray <= idxUpperBound)
8305 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8306 return iemRaiseBoundRangeExceeded(pVCpu);
8307}
8308
8309
8310/**
8311 * Implements the 32-bit version of 'BOUND'.
8312 */
8313IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
8314{
8315 /*
8316 * Check if the index is inside the bounds, otherwise raise #BR.
8317 */
8318 if ( idxArray >= idxLowerBound
8319 && idxArray <= idxUpperBound)
8320 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8321 return iemRaiseBoundRangeExceeded(pVCpu);
8322}
8323
8324
8325
8326/*
8327 * Instantiate the various string operation combinations.
8328 */
8329#define OP_SIZE 8
8330#define ADDR_SIZE 16
8331#include "IEMAllCImplStrInstr.cpp.h"
8332#define OP_SIZE 8
8333#define ADDR_SIZE 32
8334#include "IEMAllCImplStrInstr.cpp.h"
8335#define OP_SIZE 8
8336#define ADDR_SIZE 64
8337#include "IEMAllCImplStrInstr.cpp.h"
8338
8339#define OP_SIZE 16
8340#define ADDR_SIZE 16
8341#include "IEMAllCImplStrInstr.cpp.h"
8342#define OP_SIZE 16
8343#define ADDR_SIZE 32
8344#include "IEMAllCImplStrInstr.cpp.h"
8345#define OP_SIZE 16
8346#define ADDR_SIZE 64
8347#include "IEMAllCImplStrInstr.cpp.h"
8348
8349#define OP_SIZE 32
8350#define ADDR_SIZE 16
8351#include "IEMAllCImplStrInstr.cpp.h"
8352#define OP_SIZE 32
8353#define ADDR_SIZE 32
8354#include "IEMAllCImplStrInstr.cpp.h"
8355#define OP_SIZE 32
8356#define ADDR_SIZE 64
8357#include "IEMAllCImplStrInstr.cpp.h"
8358
8359#define OP_SIZE 64
8360#define ADDR_SIZE 32
8361#include "IEMAllCImplStrInstr.cpp.h"
8362#define OP_SIZE 64
8363#define ADDR_SIZE 64
8364#include "IEMAllCImplStrInstr.cpp.h"
8365
8366
8367/**
8368 * Implements 'XGETBV'.
8369 */
8370IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
8371{
8372 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
8373 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8374 {
8375 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8376 switch (uEcx)
8377 {
8378 case 0:
8379 break;
8380
8381 case 1: /** @todo Implement XCR1 support. */
8382 default:
8383 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
8384 return iemRaiseGeneralProtectionFault0(pVCpu);
8385
8386 }
8387 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8388 pVCpu->cpum.GstCtx.rax = RT_LO_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8389 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8390
8391 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8392 }
8393 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
8394 return iemRaiseUndefinedOpcode(pVCpu);
8395}
8396
8397
8398/**
8399 * Implements 'XSETBV'.
8400 */
8401IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
8402{
8403 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8404 {
8405 /** @todo explain why this happens before the CPL check. */
8406 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
8407 { /* probable */ }
8408 else
8409 {
8410 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
8411 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
8412 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8413 }
8414
8415 if (IEM_GET_CPL(pVCpu) == 0)
8416 {
8417 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8418
8419 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8420 { /* probable */ }
8421 else
8422 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_XSETBV, cbInstr);
8423
8424 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8425 uint64_t uNewValue = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx);
8426 switch (uEcx)
8427 {
8428 case 0:
8429 {
8430 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
8431 if (rc == VINF_SUCCESS)
8432 break;
8433 Assert(rc == VERR_CPUM_RAISE_GP_0);
8434 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8435 return iemRaiseGeneralProtectionFault0(pVCpu);
8436 }
8437
8438 case 1: /** @todo Implement XCR1 support. */
8439 default:
8440 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8441 return iemRaiseGeneralProtectionFault0(pVCpu);
8442
8443 }
8444
8445 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8446 }
8447
8448 Log(("xsetbv cpl=%u -> GP(0)\n", IEM_GET_CPL(pVCpu)));
8449 return iemRaiseGeneralProtectionFault0(pVCpu);
8450 }
8451 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
8452 return iemRaiseUndefinedOpcode(pVCpu);
8453}
8454
8455#ifndef RT_ARCH_ARM64
8456# ifdef IN_RING3
8457
8458/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
8459struct IEMCIMPLCX16ARGS
8460{
8461 PRTUINT128U pu128Dst;
8462 PRTUINT128U pu128RaxRdx;
8463 PRTUINT128U pu128RbxRcx;
8464 uint32_t *pEFlags;
8465# ifdef VBOX_STRICT
8466 uint32_t cCalls;
8467# endif
8468};
8469
8470/**
8471 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
8472 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
8473 */
8474static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPUCC pVCpu, void *pvUser)
8475{
8476 RT_NOREF(pVM, pVCpu);
8477 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
8478# ifdef VBOX_STRICT
8479 Assert(pArgs->cCalls == 0);
8480 pArgs->cCalls++;
8481# endif
8482
8483 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
8484 return VINF_SUCCESS;
8485}
8486
8487# endif /* IN_RING3 */
8488
8489/**
8490 * Implements 'CMPXCHG16B' fallback using rendezvous.
8491 */
8492IEM_CIMPL_DEF_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
8493 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo)
8494{
8495# ifdef IN_RING3
8496 struct IEMCIMPLCX16ARGS Args;
8497 Args.pu128Dst = pu128Dst;
8498 Args.pu128RaxRdx = pu128RaxRdx;
8499 Args.pu128RbxRcx = pu128RbxRcx;
8500 Args.pEFlags = pEFlags;
8501# ifdef VBOX_STRICT
8502 Args.cCalls = 0;
8503# endif
8504 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
8505 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
8506 Assert(Args.cCalls == 1);
8507 if (rcStrict == VINF_SUCCESS)
8508 {
8509 /* Duplicated tail code. */
8510 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8511 if (rcStrict == VINF_SUCCESS)
8512 {
8513 pVCpu->cpum.GstCtx.eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
8514 if (!(*pEFlags & X86_EFL_ZF))
8515 {
8516 pVCpu->cpum.GstCtx.rax = pu128RaxRdx->s.Lo;
8517 pVCpu->cpum.GstCtx.rdx = pu128RaxRdx->s.Hi;
8518 }
8519 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8520 }
8521 }
8522 return rcStrict;
8523# else
8524 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags, bUnmapInfo);
8525 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
8526# endif
8527}
8528
8529#endif /* RT_ARCH_ARM64 */
8530
8531/**
8532 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
8533 *
8534 * This is implemented in C because it triggers a load like behaviour without
8535 * actually reading anything. Since that's not so common, it's implemented
8536 * here.
8537 *
8538 * @param iEffSeg The effective segment.
8539 * @param GCPtrEff The address of the image.
8540 */
8541IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8542{
8543 /*
8544 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
8545 */
8546 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
8547 if (rcStrict == VINF_SUCCESS)
8548 {
8549 RTGCPHYS GCPhysMem;
8550 /** @todo access size. */
8551 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
8552 if (rcStrict == VINF_SUCCESS)
8553 {
8554#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8555 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8556 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
8557 { /* probable */ }
8558 else
8559 {
8560 /*
8561 * CLFLUSH/CLFLUSHOPT does not access the memory, but flushes the cache-line
8562 * that contains the address. However, if the address falls in the APIC-access
8563 * page, the address flushed must instead be the corresponding address in the
8564 * virtual-APIC page.
8565 *
8566 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
8567 */
8568 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
8569 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
8570 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
8571 return rcStrict;
8572 }
8573#endif
8574 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8575 }
8576 }
8577
8578 return rcStrict;
8579}
8580
8581
8582/**
8583 * Implements 'FINIT' and 'FNINIT'.
8584 *
8585 * @param fCheckXcpts Whether to check for umasked pending exceptions or
8586 * not.
8587 */
8588IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
8589{
8590 /*
8591 * Exceptions.
8592 */
8593 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8594 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
8595 return iemRaiseDeviceNotAvailable(pVCpu);
8596
8597 iemFpuActualizeStateForChange(pVCpu);
8598 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_X87);
8599
8600 /* FINIT: Raise #MF on pending exception(s): */
8601 if (fCheckXcpts && (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))
8602 return iemRaiseMathFault(pVCpu);
8603
8604 /*
8605 * Reset the state.
8606 */
8607 PX86XSAVEAREA pXState = &pVCpu->cpum.GstCtx.XState;
8608
8609 /* Rotate the stack to account for changed TOS. */
8610 iemFpuRotateStackSetTop(&pXState->x87, 0);
8611
8612 pXState->x87.FCW = 0x37f;
8613 pXState->x87.FSW = 0;
8614 pXState->x87.FTW = 0x00; /* 0 - empty. */
8615 /** @todo Intel says the instruction and data pointers are not cleared on
8616 * 387, presume that 8087 and 287 doesn't do so either. */
8617 /** @todo test this stuff. */
8618 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
8619 {
8620 pXState->x87.FPUDP = 0;
8621 pXState->x87.DS = 0; //??
8622 pXState->x87.Rsrvd2 = 0;
8623 pXState->x87.FPUIP = 0;
8624 pXState->x87.CS = 0; //??
8625 pXState->x87.Rsrvd1 = 0;
8626 }
8627 pXState->x87.FOP = 0;
8628
8629 iemHlpUsedFpu(pVCpu);
8630 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8631}
8632
8633
8634/**
8635 * Implements 'FXSAVE'.
8636 *
8637 * @param iEffSeg The effective segment.
8638 * @param GCPtrEff The address of the image.
8639 * @param enmEffOpSize The operand size (only REX.W really matters).
8640 */
8641IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8642{
8643 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8644
8645 /** @todo check out bugref{1529} and AMD behaviour */
8646
8647 /*
8648 * Raise exceptions.
8649 */
8650 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8651 return iemRaiseDeviceNotAvailable(pVCpu);
8652
8653 /*
8654 * Access the memory.
8655 */
8656 uint8_t bUnmapInfo;
8657 void *pvMem512;
8658 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfo, 512,
8659 iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8660 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8661 if (rcStrict != VINF_SUCCESS)
8662 return rcStrict;
8663 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8664 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8665
8666 /*
8667 * Store the registers.
8668 */
8669 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8670 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
8671
8672 /* common for all formats */
8673 pDst->FCW = pSrc->FCW;
8674 pDst->FSW = pSrc->FSW;
8675 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8676 pDst->FOP = pSrc->FOP;
8677 pDst->MXCSR = pSrc->MXCSR;
8678 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8679 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8680 {
8681 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8682 * them for now... */
8683 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8684 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8685 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8686 pDst->aRegs[i].au32[3] = 0;
8687 }
8688
8689 /* FPU IP, CS, DP and DS. */
8690 pDst->FPUIP = pSrc->FPUIP;
8691 pDst->CS = pSrc->CS;
8692 pDst->FPUDP = pSrc->FPUDP;
8693 pDst->DS = pSrc->DS;
8694 if (enmEffOpSize == IEMMODE_64BIT)
8695 {
8696 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8697 pDst->Rsrvd1 = pSrc->Rsrvd1;
8698 pDst->Rsrvd2 = pSrc->Rsrvd2;
8699 }
8700 else
8701 {
8702 pDst->Rsrvd1 = 0;
8703 pDst->Rsrvd2 = 0;
8704 }
8705
8706 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set. */
8707 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8708 || !IEM_IS_64BIT_CODE(pVCpu)
8709 || IEM_GET_CPL(pVCpu) != 0)
8710 {
8711 uint32_t cXmmRegs = IEM_IS_64BIT_CODE(pVCpu) ? 16 : 8;
8712 for (uint32_t i = 0; i < cXmmRegs; i++)
8713 pDst->aXMM[i] = pSrc->aXMM[i];
8714 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8715 * right? */
8716 }
8717
8718 /*
8719 * Commit the memory.
8720 */
8721 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8722 if (rcStrict != VINF_SUCCESS)
8723 return rcStrict;
8724
8725 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8726}
8727
8728
8729/**
8730 * Implements 'FXRSTOR'.
8731 *
8732 * @param iEffSeg The effective segment register for @a GCPtrEff.
8733 * @param GCPtrEff The address of the image.
8734 * @param enmEffOpSize The operand size (only REX.W really matters).
8735 */
8736IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8737{
8738 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8739
8740 /** @todo check out bugref{1529} and AMD behaviour */
8741
8742 /*
8743 * Raise exceptions.
8744 */
8745 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8746 return iemRaiseDeviceNotAvailable(pVCpu);
8747
8748 /*
8749 * Access the memory.
8750 */
8751 uint8_t bUnmapInfo;
8752 void *pvMem512;
8753 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfo, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8754 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8755 if (rcStrict != VINF_SUCCESS)
8756 return rcStrict;
8757 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8758 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8759
8760 /*
8761 * Check the state for stuff which will #GP(0).
8762 */
8763 uint32_t const fMXCSR = pSrc->MXCSR;
8764 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8765 if (fMXCSR & ~fMXCSR_MASK)
8766 {
8767 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
8768 return iemRaiseGeneralProtectionFault0(pVCpu);
8769 }
8770
8771 /*
8772 * Load the registers.
8773 */
8774 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8775 * implementation specific whether MXCSR and XMM0-XMM7 are
8776 * restored according to Intel.
8777 * AMD says MXCSR and XMM registers are never loaded if
8778 * CR4.OSFXSR=0.
8779 */
8780
8781 /* common for all formats */
8782 pDst->FCW = pSrc->FCW;
8783 pDst->FSW = pSrc->FSW;
8784 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8785 pDst->FOP = pSrc->FOP;
8786 pDst->MXCSR = fMXCSR;
8787 /* (MXCSR_MASK is read-only) */
8788 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8789 {
8790 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8791 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8792 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8793 pDst->aRegs[i].au32[3] = 0;
8794 }
8795
8796 /* FPU IP, CS, DP and DS. */
8797 /** @todo AMD says this is only done if FSW.ES is set after loading. */
8798 if (enmEffOpSize == IEMMODE_64BIT)
8799 {
8800 pDst->FPUIP = pSrc->FPUIP;
8801 pDst->CS = pSrc->CS;
8802 pDst->Rsrvd1 = pSrc->Rsrvd1;
8803 pDst->FPUDP = pSrc->FPUDP;
8804 pDst->DS = pSrc->DS;
8805 pDst->Rsrvd2 = pSrc->Rsrvd2;
8806 }
8807 else
8808 {
8809 pDst->FPUIP = pSrc->FPUIP;
8810 pDst->CS = pSrc->CS;
8811 pDst->Rsrvd1 = 0;
8812 pDst->FPUDP = pSrc->FPUDP;
8813 pDst->DS = pSrc->DS;
8814 pDst->Rsrvd2 = 0;
8815 }
8816
8817 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set.
8818 * Does not affect MXCSR, only registers.
8819 */
8820 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8821 || !IEM_IS_64BIT_CODE(pVCpu)
8822 || IEM_GET_CPL(pVCpu) != 0)
8823 {
8824 uint32_t cXmmRegs = IEM_IS_64BIT_CODE(pVCpu) ? 16 : 8;
8825 for (uint32_t i = 0; i < cXmmRegs; i++)
8826 pDst->aXMM[i] = pSrc->aXMM[i];
8827 }
8828
8829 pDst->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
8830 iemFpuRecalcExceptionStatus(pDst);
8831
8832 if (pDst->FSW & X86_FSW_ES)
8833 Log11(("fxrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
8834 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
8835
8836 /*
8837 * Unmap the memory.
8838 */
8839 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8840 if (rcStrict != VINF_SUCCESS)
8841 return rcStrict;
8842
8843 iemHlpUsedFpu(pVCpu);
8844 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8845}
8846
8847
8848/**
8849 * Implements 'XSAVE'.
8850 *
8851 * @param iEffSeg The effective segment.
8852 * @param GCPtrEff The address of the image.
8853 * @param enmEffOpSize The operand size (only REX.W really matters).
8854 */
8855IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8856{
8857 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8858
8859 /*
8860 * Raise exceptions.
8861 */
8862 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8863 return iemRaiseUndefinedOpcode(pVCpu);
8864 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8865 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8866 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS)))
8867 { /* likely */ }
8868 else
8869 {
8870 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8871 return iemRaiseUndefinedOpcode(pVCpu);
8872 }
8873 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8874 return iemRaiseDeviceNotAvailable(pVCpu);
8875
8876 /*
8877 * Calc the requested mask.
8878 */
8879 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8880 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8881 uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8882
8883/** @todo figure out the exact protocol for the memory access. Currently we
8884 * just need this crap to work halfways to make it possible to test
8885 * AVX instructions. */
8886/** @todo figure out the XINUSE and XMODIFIED */
8887
8888 /*
8889 * Access the x87 memory state.
8890 */
8891 /* The x87+SSE state. */
8892 uint8_t bUnmapInfoMem512;
8893 void *pvMem512;
8894 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfoMem512, 512,
8895 iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8896 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8897 if (rcStrict != VINF_SUCCESS)
8898 return rcStrict;
8899 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8900 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8901
8902 /* The header. */
8903 uint8_t bUnmapInfoHdr;
8904 PX86XSAVEHDR pHdr;
8905 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, &bUnmapInfoHdr, sizeof(pHdr),
8906 iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
8907 if (rcStrict != VINF_SUCCESS)
8908 return rcStrict;
8909
8910 /*
8911 * Store the X87 state.
8912 */
8913 if (fReqComponents & XSAVE_C_X87)
8914 {
8915 /* common for all formats */
8916 pDst->FCW = pSrc->FCW;
8917 pDst->FSW = pSrc->FSW;
8918 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8919 pDst->FOP = pSrc->FOP;
8920 pDst->FPUIP = pSrc->FPUIP;
8921 pDst->CS = pSrc->CS;
8922 pDst->FPUDP = pSrc->FPUDP;
8923 pDst->DS = pSrc->DS;
8924 if (enmEffOpSize == IEMMODE_64BIT)
8925 {
8926 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8927 pDst->Rsrvd1 = pSrc->Rsrvd1;
8928 pDst->Rsrvd2 = pSrc->Rsrvd2;
8929 }
8930 else
8931 {
8932 pDst->Rsrvd1 = 0;
8933 pDst->Rsrvd2 = 0;
8934 }
8935 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8936 {
8937 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8938 * them for now... */
8939 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8940 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8941 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8942 pDst->aRegs[i].au32[3] = 0;
8943 }
8944
8945 }
8946
8947 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8948 {
8949 pDst->MXCSR = pSrc->MXCSR;
8950 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8951 }
8952
8953 if (fReqComponents & XSAVE_C_SSE)
8954 {
8955 /* XMM registers. */
8956 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8957 for (uint32_t i = 0; i < cXmmRegs; i++)
8958 pDst->aXMM[i] = pSrc->aXMM[i];
8959 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8960 * right? */
8961 }
8962
8963 /* Commit the x87 state bits. (probably wrong) */
8964 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoMem512);
8965 if (rcStrict != VINF_SUCCESS)
8966 return rcStrict;
8967
8968 /*
8969 * Store AVX state.
8970 */
8971 if (fReqComponents & XSAVE_C_YMM)
8972 {
8973 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8974 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8975 uint8_t bUnmapInfoComp;
8976 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
8977 PX86XSAVEYMMHI pCompDst;
8978 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, &bUnmapInfoComp, sizeof(*pCompDst), iEffSeg,
8979 GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
8980 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */);
8981 if (rcStrict != VINF_SUCCESS)
8982 return rcStrict;
8983
8984 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8985 for (uint32_t i = 0; i < cXmmRegs; i++)
8986 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
8987
8988 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoComp);
8989 if (rcStrict != VINF_SUCCESS)
8990 return rcStrict;
8991 }
8992
8993 /*
8994 * Update the header.
8995 */
8996 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
8997 | (fReqComponents & fXInUse);
8998
8999 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoHdr);
9000 if (rcStrict != VINF_SUCCESS)
9001 return rcStrict;
9002
9003 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9004}
9005
9006
9007/**
9008 * Implements 'XRSTOR'.
9009 *
9010 * @param iEffSeg The effective segment.
9011 * @param GCPtrEff The address of the image.
9012 * @param enmEffOpSize The operand size (only REX.W really matters).
9013 */
9014IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
9015{
9016 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
9017
9018 /*
9019 * Raise exceptions.
9020 */
9021 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9022 return iemRaiseUndefinedOpcode(pVCpu);
9023 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
9024 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
9025 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS)))
9026 { /* likely */ }
9027 else
9028 {
9029 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
9030 return iemRaiseUndefinedOpcode(pVCpu);
9031 }
9032 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
9033 return iemRaiseDeviceNotAvailable(pVCpu);
9034 if (GCPtrEff & 63)
9035 {
9036 /** @todo CPU/VM detection possible! \#AC might not be signal for
9037 * all/any misalignment sizes, intel says its an implementation detail. */
9038 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
9039 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
9040 && IEM_GET_CPL(pVCpu) == 3)
9041 return iemRaiseAlignmentCheckException(pVCpu);
9042 return iemRaiseGeneralProtectionFault0(pVCpu);
9043 }
9044
9045/** @todo figure out the exact protocol for the memory access. Currently we
9046 * just need this crap to work halfways to make it possible to test
9047 * AVX instructions. */
9048/** @todo figure out the XINUSE and XMODIFIED */
9049
9050 /*
9051 * Access the x87 memory state.
9052 */
9053 /* The x87+SSE state. */
9054 uint8_t bUnmapInfoMem512;
9055 void *pvMem512;
9056 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfoMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
9057 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
9058 if (rcStrict != VINF_SUCCESS)
9059 return rcStrict;
9060 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
9061 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
9062
9063 /*
9064 * Calc the requested mask
9065 */
9066 uint8_t bUnmapInfoHdr;
9067 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr;
9068 PCX86XSAVEHDR pHdrSrc;
9069 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, &bUnmapInfoHdr, sizeof(*pHdrSrc), iEffSeg, GCPtrEff + 512,
9070 IEM_ACCESS_DATA_R, 0 /* checked above */);
9071 if (rcStrict != VINF_SUCCESS)
9072 return rcStrict;
9073
9074 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
9075 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9076 //uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
9077 uint64_t const fRstorMask = pHdrSrc->bmXState;
9078 uint64_t const fCompMask = pHdrSrc->bmXComp;
9079
9080 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9081
9082 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
9083
9084 /* We won't need this any longer. */
9085 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoHdr);
9086 if (rcStrict != VINF_SUCCESS)
9087 return rcStrict;
9088
9089 /*
9090 * Load the X87 state.
9091 */
9092 if (fReqComponents & XSAVE_C_X87)
9093 {
9094 if (fRstorMask & XSAVE_C_X87)
9095 {
9096 pDst->FCW = pSrc->FCW;
9097 pDst->FSW = pSrc->FSW;
9098 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
9099 pDst->FOP = pSrc->FOP;
9100 pDst->FPUIP = pSrc->FPUIP;
9101 pDst->CS = pSrc->CS;
9102 pDst->FPUDP = pSrc->FPUDP;
9103 pDst->DS = pSrc->DS;
9104 if (enmEffOpSize == IEMMODE_64BIT)
9105 {
9106 /* Load upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
9107 pDst->Rsrvd1 = pSrc->Rsrvd1;
9108 pDst->Rsrvd2 = pSrc->Rsrvd2;
9109 }
9110 else
9111 {
9112 pDst->Rsrvd1 = 0;
9113 pDst->Rsrvd2 = 0;
9114 }
9115 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
9116 {
9117 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
9118 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
9119 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
9120 pDst->aRegs[i].au32[3] = 0;
9121 }
9122
9123 pDst->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9124 iemFpuRecalcExceptionStatus(pDst);
9125
9126 if (pDst->FSW & X86_FSW_ES)
9127 Log11(("xrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
9128 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
9129 }
9130 else
9131 {
9132 pDst->FCW = 0x37f;
9133 pDst->FSW = 0;
9134 pDst->FTW = 0x00; /* 0 - empty. */
9135 pDst->FPUDP = 0;
9136 pDst->DS = 0; //??
9137 pDst->Rsrvd2= 0;
9138 pDst->FPUIP = 0;
9139 pDst->CS = 0; //??
9140 pDst->Rsrvd1= 0;
9141 pDst->FOP = 0;
9142 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
9143 {
9144 pDst->aRegs[i].au32[0] = 0;
9145 pDst->aRegs[i].au32[1] = 0;
9146 pDst->aRegs[i].au32[2] = 0;
9147 pDst->aRegs[i].au32[3] = 0;
9148 }
9149 }
9150 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
9151 }
9152
9153 /* MXCSR */
9154 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
9155 {
9156 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
9157 pDst->MXCSR = pSrc->MXCSR;
9158 else
9159 pDst->MXCSR = 0x1f80;
9160 }
9161
9162 /* XMM registers. */
9163 if (fReqComponents & XSAVE_C_SSE)
9164 {
9165 if (fRstorMask & XSAVE_C_SSE)
9166 {
9167 for (uint32_t i = 0; i < cXmmRegs; i++)
9168 pDst->aXMM[i] = pSrc->aXMM[i];
9169 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
9170 * right? */
9171 }
9172 else
9173 {
9174 for (uint32_t i = 0; i < cXmmRegs; i++)
9175 {
9176 pDst->aXMM[i].au64[0] = 0;
9177 pDst->aXMM[i].au64[1] = 0;
9178 }
9179 }
9180 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
9181 }
9182
9183 /* Unmap the x87 state bits (so we've don't run out of mapping). */
9184 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoMem512);
9185 if (rcStrict != VINF_SUCCESS)
9186 return rcStrict;
9187
9188 /*
9189 * Restore AVX state.
9190 */
9191 if (fReqComponents & XSAVE_C_YMM)
9192 {
9193 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
9194 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
9195
9196 if (fRstorMask & XSAVE_C_YMM)
9197 {
9198 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
9199 uint8_t bUnmapInfoComp;
9200 PCX86XSAVEYMMHI pCompSrc;
9201 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, &bUnmapInfoComp, sizeof(*pCompDst),
9202 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
9203 IEM_ACCESS_DATA_R, 0 /* checked above */);
9204 if (rcStrict != VINF_SUCCESS)
9205 return rcStrict;
9206
9207 for (uint32_t i = 0; i < cXmmRegs; i++)
9208 {
9209 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
9210 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
9211 }
9212
9213 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoComp);
9214 if (rcStrict != VINF_SUCCESS)
9215 return rcStrict;
9216 }
9217 else
9218 {
9219 for (uint32_t i = 0; i < cXmmRegs; i++)
9220 {
9221 pCompDst->aYmmHi[i].au64[0] = 0;
9222 pCompDst->aYmmHi[i].au64[1] = 0;
9223 }
9224 }
9225 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
9226 }
9227
9228 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9229}
9230
9231
9232
9233
9234/**
9235 * Implements 'STMXCSR'.
9236 *
9237 * @param iEffSeg The effective segment register for @a GCPtrEff.
9238 * @param GCPtrEff The address of the image.
9239 */
9240IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9241{
9242 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9243
9244 /*
9245 * Raise exceptions.
9246 */
9247 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9248 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9249 {
9250 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9251 {
9252 /*
9253 * Do the job.
9254 */
9255 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9256 if (rcStrict == VINF_SUCCESS)
9257 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9258 return rcStrict;
9259 }
9260 return iemRaiseDeviceNotAvailable(pVCpu);
9261 }
9262 return iemRaiseUndefinedOpcode(pVCpu);
9263}
9264
9265
9266/**
9267 * Implements 'VSTMXCSR'.
9268 *
9269 * @param iEffSeg The effective segment register for @a GCPtrEff.
9270 * @param GCPtrEff The address of the image.
9271 */
9272IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9273{
9274 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx);
9275
9276 /*
9277 * Raise exceptions.
9278 */
9279 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
9280 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
9281 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
9282 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9283 {
9284 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9285 {
9286 /*
9287 * Do the job.
9288 */
9289 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9290 if (rcStrict == VINF_SUCCESS)
9291 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9292 return rcStrict;
9293 }
9294 return iemRaiseDeviceNotAvailable(pVCpu);
9295 }
9296 return iemRaiseUndefinedOpcode(pVCpu);
9297}
9298
9299
9300/**
9301 * Implements 'LDMXCSR'.
9302 *
9303 * @param iEffSeg The effective segment register for @a GCPtrEff.
9304 * @param GCPtrEff The address of the image.
9305 */
9306IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9307{
9308 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9309
9310 /*
9311 * Raise exceptions.
9312 */
9313 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
9314 * happen after or before \#UD and \#EM? */
9315 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9316 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9317 {
9318 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9319 {
9320 /*
9321 * Do the job.
9322 */
9323 uint32_t fNewMxCsr;
9324 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
9325 if (rcStrict == VINF_SUCCESS)
9326 {
9327 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
9328 if (!(fNewMxCsr & ~fMxCsrMask))
9329 {
9330 pVCpu->cpum.GstCtx.XState.x87.MXCSR = fNewMxCsr;
9331 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9332 }
9333 Log(("ldmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
9334 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
9335 return iemRaiseGeneralProtectionFault0(pVCpu);
9336 }
9337 return rcStrict;
9338 }
9339 return iemRaiseDeviceNotAvailable(pVCpu);
9340 }
9341 return iemRaiseUndefinedOpcode(pVCpu);
9342}
9343
9344
9345/**
9346 * Implements 'VSTMXCSR'.
9347 *
9348 * @param iEffSeg The effective segment register for @a GCPtrEff.
9349 * @param GCPtrEff The address of the image.
9350 */
9351IEM_CIMPL_DEF_2(iemCImpl_vldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9352{
9353 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx);
9354
9355 /*
9356 * Raise exceptions.
9357 */
9358 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
9359 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
9360 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM))
9361 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9362 {
9363 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9364 {
9365 /*
9366 * Do the job.
9367 */
9368 uint32_t fNewMxCsr;
9369 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
9370 if (rcStrict == VINF_SUCCESS)
9371 {
9372 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
9373 if (!(fNewMxCsr & ~fMxCsrMask))
9374 {
9375 pVCpu->cpum.GstCtx.XState.x87.MXCSR = fNewMxCsr;
9376 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9377 }
9378 Log(("ldmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
9379 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
9380 return iemRaiseGeneralProtectionFault0(pVCpu);
9381 }
9382 return rcStrict;
9383 }
9384 return iemRaiseDeviceNotAvailable(pVCpu);
9385 }
9386 return iemRaiseUndefinedOpcode(pVCpu);
9387}
9388
9389
9390/**
9391 * Commmon routine for fnstenv and fnsave.
9392 *
9393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9394 * @param enmEffOpSize The effective operand size.
9395 * @param uPtr Where to store the state.
9396 */
9397static void iemCImplCommonFpuStoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr)
9398{
9399 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9400 PCX86FXSTATE pSrcX87 = &pVCpu->cpum.GstCtx.XState.x87;
9401 if (enmEffOpSize == IEMMODE_16BIT)
9402 {
9403 uPtr.pu16[0] = pSrcX87->FCW;
9404 uPtr.pu16[1] = pSrcX87->FSW;
9405 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
9406 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9407 {
9408 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
9409 * protected mode or long mode and we save it in real mode? And vice
9410 * versa? And with 32-bit operand size? I think CPU is storing the
9411 * effective address ((CS << 4) + IP) in the offset register and not
9412 * doing any address calculations here. */
9413 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
9414 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
9415 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
9416 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
9417 }
9418 else
9419 {
9420 uPtr.pu16[3] = pSrcX87->FPUIP;
9421 uPtr.pu16[4] = pSrcX87->CS;
9422 uPtr.pu16[5] = pSrcX87->FPUDP;
9423 uPtr.pu16[6] = pSrcX87->DS;
9424 }
9425 }
9426 else
9427 {
9428 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
9429 uPtr.pu16[0*2] = pSrcX87->FCW;
9430 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
9431 uPtr.pu16[1*2] = pSrcX87->FSW;
9432 uPtr.pu16[1*2+1] = 0xffff;
9433 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
9434 uPtr.pu16[2*2+1] = 0xffff;
9435 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9436 {
9437 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
9438 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
9439 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
9440 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
9441 }
9442 else
9443 {
9444 uPtr.pu32[3] = pSrcX87->FPUIP;
9445 uPtr.pu16[4*2] = pSrcX87->CS;
9446 uPtr.pu16[4*2+1] = pSrcX87->FOP;
9447 uPtr.pu32[5] = pSrcX87->FPUDP;
9448 uPtr.pu16[6*2] = pSrcX87->DS;
9449 uPtr.pu16[6*2+1] = 0xffff;
9450 }
9451 }
9452}
9453
9454
9455/**
9456 * Commmon routine for fldenv and frstor
9457 *
9458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9459 * @param enmEffOpSize The effective operand size.
9460 * @param uPtr Where to store the state.
9461 */
9462static void iemCImplCommonFpuRestoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr)
9463{
9464 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9465 PX86FXSTATE pDstX87 = &pVCpu->cpum.GstCtx.XState.x87;
9466 if (enmEffOpSize == IEMMODE_16BIT)
9467 {
9468 pDstX87->FCW = uPtr.pu16[0];
9469 pDstX87->FSW = uPtr.pu16[1];
9470 pDstX87->FTW = uPtr.pu16[2];
9471 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9472 {
9473 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
9474 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
9475 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
9476 pDstX87->CS = 0;
9477 pDstX87->Rsrvd1= 0;
9478 pDstX87->DS = 0;
9479 pDstX87->Rsrvd2= 0;
9480 }
9481 else
9482 {
9483 pDstX87->FPUIP = uPtr.pu16[3];
9484 pDstX87->CS = uPtr.pu16[4];
9485 pDstX87->Rsrvd1= 0;
9486 pDstX87->FPUDP = uPtr.pu16[5];
9487 pDstX87->DS = uPtr.pu16[6];
9488 pDstX87->Rsrvd2= 0;
9489 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
9490 }
9491 }
9492 else
9493 {
9494 pDstX87->FCW = uPtr.pu16[0*2];
9495 pDstX87->FSW = uPtr.pu16[1*2];
9496 pDstX87->FTW = uPtr.pu16[2*2];
9497 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9498 {
9499 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
9500 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
9501 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
9502 pDstX87->CS = 0;
9503 pDstX87->Rsrvd1= 0;
9504 pDstX87->DS = 0;
9505 pDstX87->Rsrvd2= 0;
9506 }
9507 else
9508 {
9509 pDstX87->FPUIP = uPtr.pu32[3];
9510 pDstX87->CS = uPtr.pu16[4*2];
9511 pDstX87->Rsrvd1= 0;
9512 pDstX87->FOP = uPtr.pu16[4*2+1];
9513 pDstX87->FPUDP = uPtr.pu32[5];
9514 pDstX87->DS = uPtr.pu16[6*2];
9515 pDstX87->Rsrvd2= 0;
9516 }
9517 }
9518
9519 /* Make adjustments. */
9520 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
9521#ifdef LOG_ENABLED
9522 uint16_t const fOldFsw = pDstX87->FSW;
9523#endif
9524 pDstX87->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9525 iemFpuRecalcExceptionStatus(pDstX87);
9526#ifdef LOG_ENABLED
9527 if ((pDstX87->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9528 Log11(("iemCImplCommonFpuRestoreEnv: %04x:%08RX64: %s FPU exception (FCW=%#x FSW=%#x -> %#x)\n",
9529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fOldFsw & X86_FSW_ES ? "Suppressed" : "Raised",
9530 pDstX87->FCW, fOldFsw, pDstX87->FSW));
9531#endif
9532
9533 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
9534 * exceptions are pending after loading the saved state? */
9535}
9536
9537
9538/**
9539 * Implements 'FNSTENV'.
9540 *
9541 * @param enmEffOpSize The operand size (only REX.W really matters).
9542 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9543 * @param GCPtrEffDst The address of the image.
9544 */
9545IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9546{
9547 uint8_t bUnmapInfo;
9548 RTPTRUNION uPtr;
9549 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9550 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
9551 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */);
9552 if (rcStrict != VINF_SUCCESS)
9553 return rcStrict;
9554
9555 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9556
9557 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
9558 if (rcStrict != VINF_SUCCESS)
9559 return rcStrict;
9560
9561 /* Mask all math exceptions. Any possibly pending exceptions will be cleared. */
9562 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9563 pFpuCtx->FCW |= X86_FCW_XCPT_MASK;
9564#ifdef LOG_ENABLED
9565 uint16_t fOldFsw = pFpuCtx->FSW;
9566#endif
9567 iemFpuRecalcExceptionStatus(pFpuCtx);
9568#ifdef LOG_ENABLED
9569 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9570 Log11(("fnstenv: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9571 fOldFsw & X86_FSW_ES ? "Suppressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9572#endif
9573
9574 iemHlpUsedFpu(pVCpu);
9575
9576 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9577 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9578}
9579
9580
9581/**
9582 * Implements 'FNSAVE'.
9583 *
9584 * @param enmEffOpSize The operand size.
9585 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9586 * @param GCPtrEffDst The address of the image.
9587 */
9588IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9589{
9590 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9591
9592 uint8_t bUnmapInfo;
9593 RTPTRUNION uPtr;
9594 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9595 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */);
9596 if (rcStrict != VINF_SUCCESS)
9597 return rcStrict;
9598
9599 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9600 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9601 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9602 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9603 {
9604 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
9605 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
9606 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
9607 }
9608
9609 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
9610 if (rcStrict != VINF_SUCCESS)
9611 return rcStrict;
9612
9613 /* Rotate the stack to account for changed TOS. */
9614 iemFpuRotateStackSetTop(pFpuCtx, 0);
9615
9616 /*
9617 * Re-initialize the FPU context.
9618 */
9619 pFpuCtx->FCW = 0x37f;
9620 pFpuCtx->FSW = 0;
9621 pFpuCtx->FTW = 0x00; /* 0 - empty */
9622 pFpuCtx->FPUDP = 0;
9623 pFpuCtx->DS = 0;
9624 pFpuCtx->Rsrvd2= 0;
9625 pFpuCtx->FPUIP = 0;
9626 pFpuCtx->CS = 0;
9627 pFpuCtx->Rsrvd1= 0;
9628 pFpuCtx->FOP = 0;
9629
9630 iemHlpUsedFpu(pVCpu);
9631 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9632}
9633
9634
9635
9636/**
9637 * Implements 'FLDENV'.
9638 *
9639 * @param enmEffOpSize The operand size (only REX.W really matters).
9640 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9641 * @param GCPtrEffSrc The address of the image.
9642 */
9643IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9644{
9645 uint8_t bUnmapInfo;
9646 RTCPTRUNION uPtr;
9647 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9648 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R,
9649 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/);
9650 if (rcStrict != VINF_SUCCESS)
9651 return rcStrict;
9652
9653 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9654
9655 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
9656 if (rcStrict != VINF_SUCCESS)
9657 return rcStrict;
9658
9659 iemHlpUsedFpu(pVCpu);
9660 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9661}
9662
9663
9664/**
9665 * Implements 'FRSTOR'.
9666 *
9667 * @param enmEffOpSize The operand size.
9668 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9669 * @param GCPtrEffSrc The address of the image.
9670 */
9671IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9672{
9673 uint8_t bUnmapInfo;
9674 RTCPTRUNION uPtr;
9675 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9676 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ );
9677 if (rcStrict != VINF_SUCCESS)
9678 return rcStrict;
9679
9680 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9681 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9682 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9683 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9684 {
9685 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
9686 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
9687 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
9688 pFpuCtx->aRegs[i].au32[3] = 0;
9689 }
9690
9691 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
9692 if (rcStrict != VINF_SUCCESS)
9693 return rcStrict;
9694
9695 iemHlpUsedFpu(pVCpu);
9696 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9697}
9698
9699
9700/**
9701 * Implements 'FLDCW'.
9702 *
9703 * @param u16Fcw The new FCW.
9704 */
9705IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
9706{
9707 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9708
9709 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
9710 /** @todo Testcase: Try see what happens when trying to set undefined bits
9711 * (other than 6 and 7). Currently ignoring them. */
9712 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
9713 * according to FSW. (This is what is currently implemented.) */
9714 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9715 pFpuCtx->FCW = u16Fcw & (~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK); /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9716#ifdef LOG_ENABLED
9717 uint16_t fOldFsw = pFpuCtx->FSW;
9718#endif
9719 iemFpuRecalcExceptionStatus(pFpuCtx);
9720#ifdef LOG_ENABLED
9721 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9722 Log11(("fldcw: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9723 fOldFsw & X86_FSW_ES ? "Suppressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9724#endif
9725
9726 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9727 iemHlpUsedFpu(pVCpu);
9728 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9729}
9730
9731
9732
9733/**
9734 * Implements the underflow case of fxch.
9735 *
9736 * @param iStReg The other stack register.
9737 * @param uFpuOpcode The FPU opcode (for simplicity).
9738 */
9739IEM_CIMPL_DEF_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode)
9740{
9741 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9742
9743 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9744 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
9745 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9746 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
9747
9748 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
9749 * registers are read as QNaN and then exchanged. This could be
9750 * wrong... */
9751 if (pFpuCtx->FCW & X86_FCW_IM)
9752 {
9753 if (RT_BIT(iReg1) & pFpuCtx->FTW)
9754 {
9755 if (RT_BIT(iReg2) & pFpuCtx->FTW)
9756 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9757 else
9758 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
9759 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
9760 }
9761 else
9762 {
9763 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
9764 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9765 }
9766 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
9767 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
9768 }
9769 else
9770 {
9771 /* raise underflow exception, don't change anything. */
9772 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
9773 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9774 Log11(("fxch: %04x:%08RX64: Underflow exception (FSW=%#x)\n",
9775 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9776 }
9777
9778 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
9779 iemHlpUsedFpu(pVCpu);
9780 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9781}
9782
9783
9784/**
9785 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
9786 *
9787 * @param iStReg The other stack register.
9788 * @param fUCmp true for FUCOMI[P], false for FCOMI[P].
9789 * @param uPopAndFpuOpcode Bits 15-0: The FPU opcode.
9790 * Bit 31: Whether we should pop the stack when
9791 * done or not.
9792 */
9793IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode)
9794{
9795 Assert(iStReg < 8);
9796 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9797
9798 /*
9799 * Raise exceptions.
9800 */
9801 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
9802 return iemRaiseDeviceNotAvailable(pVCpu);
9803
9804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9805 uint16_t u16Fsw = pFpuCtx->FSW;
9806 if (u16Fsw & X86_FSW_ES)
9807 return iemRaiseMathFault(pVCpu);
9808
9809 /*
9810 * Check if any of the register accesses causes #SF + #IA.
9811 */
9812 bool fPop = RT_BOOL(uPopAndFpuOpcode & RT_BIT_32(31));
9813 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
9814 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9815 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
9816 {
9817 uint32_t u32Eflags;
9818 if (!fUCmp)
9819 u32Eflags = iemAImpl_fcomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9820 else
9821 u32Eflags = iemAImpl_fucomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9822
9823 pFpuCtx->FSW &= ~X86_FSW_C1;
9824 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
9825 if ( !(u16Fsw & X86_FSW_IE)
9826 || (pFpuCtx->FCW & X86_FCW_IM) )
9827 {
9828 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9829 pVCpu->cpum.GstCtx.eflags.u |= u32Eflags & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9830 }
9831 }
9832 else if (pFpuCtx->FCW & X86_FCW_IM)
9833 {
9834 /* Masked underflow. */
9835 pFpuCtx->FSW &= ~X86_FSW_C1;
9836 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
9837 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9838 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
9839 }
9840 else
9841 {
9842 /* Raise underflow - don't touch EFLAGS or TOP. */
9843 pFpuCtx->FSW &= ~X86_FSW_C1;
9844 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9845 Log11(("fxch: %04x:%08RX64: Raising IE+SF exception (FSW=%#x)\n",
9846 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9847 fPop = false;
9848 }
9849
9850 /*
9851 * Pop if necessary.
9852 */
9853 if (fPop)
9854 {
9855 pFpuCtx->FTW &= ~RT_BIT(iReg1);
9856 iemFpuStackIncTop(pVCpu);
9857 }
9858
9859 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, (uint16_t)uPopAndFpuOpcode);
9860 iemHlpUsedFpu(pVCpu);
9861 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9862}
9863
9864
9865/**
9866 * Implements 'RDSEED'.
9867 *
9868 * @returns VINF_SUCCESS.
9869 * @param iReg The register.
9870 * @param enmEffOpSize The operand size.
9871 */
9872IEM_CIMPL_DEF_2(iemCImpl_rdseed, uint8_t, iReg, IEMMODE, enmEffOpSize)
9873{
9874#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9875 /* Nested-guest VMX intercept. */
9876 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
9877 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDSEED_EXIT))
9878 { /* probable */ }
9879 else
9880 {
9881 Log(("rdseed: Guest intercept -> VM-exit\n"));
9882 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_RDSEED, VMXINSTRID_RDSEED, cbInstr);
9883 }
9884#endif
9885
9886 uint32_t *pEFlags = &pVCpu->cpum.GstCtx.eflags.uBoth;
9887 switch (enmEffOpSize)
9888 {
9889 case IEMMODE_16BIT:
9890 {
9891 PFNIEMAIMPLRDRANDSEEDU16 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdSeed,
9892 &iemAImpl_rdseed_u16,
9893 &iemAImpl_rdseed_u16_fallback);
9894 uint16_t *pu16Dst = iemGRegRefU16(pVCpu, iReg);
9895 (pfnImpl)(pu16Dst, pEFlags);
9896 break;
9897 }
9898 case IEMMODE_32BIT:
9899 {
9900 PFNIEMAIMPLRDRANDSEEDU32 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdSeed,
9901 &iemAImpl_rdseed_u32,
9902 &iemAImpl_rdseed_u32_fallback);
9903 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, iReg);
9904 (pfnImpl)(pu32Dst, pEFlags);
9905 iemGRegStoreU32(pVCpu, iReg, *pu32Dst);
9906 break;
9907 }
9908 case IEMMODE_64BIT:
9909 {
9910 PFNIEMAIMPLRDRANDSEEDU64 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdSeed,
9911 &iemAImpl_rdseed_u64,
9912 &iemAImpl_rdseed_u64_fallback);
9913 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, iReg);
9914 (pfnImpl)(pu64Dst, pEFlags);
9915 break;
9916 }
9917 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9918 }
9919 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9920}
9921
9922
9923/**
9924 * Implements 'RDRAND'.
9925 *
9926 * @returns VINF_SUCCESS.
9927 * @param iReg The register.
9928 * @param enmEffOpSize The operand size.
9929 */
9930IEM_CIMPL_DEF_2(iemCImpl_rdrand, uint8_t, iReg, IEMMODE, enmEffOpSize)
9931{
9932#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9933 /* Nested-guest VMX intercept. */
9934 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
9935 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDRAND_EXIT))
9936 { /* probable */ }
9937 else
9938 {
9939 Log(("rdrand: Guest intercept -> VM-exit\n"));
9940 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_RDRAND, VMXINSTRID_RDRAND, cbInstr);
9941 }
9942#endif
9943
9944 uint32_t *pEFlags = &pVCpu->cpum.GstCtx.eflags.uBoth;
9945 switch (enmEffOpSize)
9946 {
9947 case IEMMODE_16BIT:
9948 {
9949 PFNIEMAIMPLRDRANDSEEDU16 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdRand, &iemAImpl_rdrand_u16,
9950 &iemAImpl_rdrand_u16_fallback);
9951 uint16_t *pu16Dst = iemGRegRefU16(pVCpu, iReg);
9952 (pfnImpl)(pu16Dst, pEFlags);
9953 break;
9954 }
9955 case IEMMODE_32BIT:
9956 {
9957 PFNIEMAIMPLRDRANDSEEDU32 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdRand, &iemAImpl_rdrand_u32,
9958 &iemAImpl_rdrand_u32_fallback);
9959 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, iReg);
9960 (pfnImpl)(pu32Dst, pEFlags);
9961 iemGRegStoreU32(pVCpu, iReg, *pu32Dst);
9962 break;
9963 }
9964 case IEMMODE_64BIT:
9965 {
9966 PFNIEMAIMPLRDRANDSEEDU64 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdRand, &iemAImpl_rdrand_u64,
9967 &iemAImpl_rdrand_u64_fallback);
9968 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, iReg);
9969 (pfnImpl)(pu64Dst, pEFlags);
9970 break;
9971 }
9972 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9973 }
9974 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9975}
9976
9977
9978/**
9979 * Worker for 'VMASKMOVPS / VPMASKMOVD' 128-bit 32-bit-masked load.
9980 *
9981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9982 * @param cbInstr The current instruction length.
9983 * @param iXRegDst The destination XMM register index.
9984 * @param iXRegMsk The mask XMM register index.
9985 * @param iEffSeg The effective segment.
9986 * @param GCPtrEffSrc The source memory address.
9987 */
9988static VBOXSTRICTRC iemCImpl_maskmov_load_u128_32_worker(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iXRegDst, uint8_t iXRegMsk, uint8_t iEffSeg, RTGCPTR GCPtrEffSrc)
9989{
9990 uint32_t fAccessed = 0;
9991
9992 PRTUINT128U puDst = (PRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDst];
9993 PCRTUINT128U puMsk = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegMsk];
9994 PCRTUINT128U puSrc;
9995
9996 for (uint32_t i = 0; i < RT_ELEMENTS(puMsk->au32); i++)
9997 {
9998 fAccessed |= puMsk->au32[i];
9999 }
10000
10001 if (fAccessed & RT_BIT(31)) {
10002 /*
10003 * Access the source memory.
10004 */
10005 uint8_t bUnmapInfo;
10006 void *pvMemSrc;
10007 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMemSrc, &bUnmapInfo, sizeof(*puSrc),
10008 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 0);
10009 if (rcStrict != VINF_SUCCESS)
10010 return rcStrict;
10011
10012 puSrc = (PCRTUINT128U)pvMemSrc;
10013
10014 for (uint32_t i = 0; i < RT_ELEMENTS(puSrc->au32); i++)
10015 {
10016 puDst->au32[i] = (puMsk->au32[i] & RT_BIT(31)) ? puSrc->au32[i] : 0;
10017 }
10018 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iXRegDst].au64[0] = 0;
10019 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iXRegDst].au64[1] = 0;
10020
10021 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
10022 if (rcStrict != VINF_SUCCESS)
10023 return rcStrict;
10024 }
10025 else
10026 {
10027 puDst->au64[0] = 0;
10028 puDst->au64[1] = 0;
10029 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iXRegDst].au64[0] = 0;
10030 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iXRegDst].au64[1] = 0;
10031 }
10032
10033 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
10034}
10035
10036
10037
10038/**
10039 * Worker for 'VMASKMOVPS / VPMASKMOVD' 256-bit 32-bit-masked load.
10040 *
10041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10042 * @param cbInstr The current instruction length.
10043 * @param iYRegDst The destination YMM register index.
10044 * @param iYRegMsk The mask YMM register index.
10045 * @param iEffSeg The effective segment.
10046 * @param GCPtrEffSrc The source memory address.
10047 */
10048static VBOXSTRICTRC iemCImpl_maskmov_load_u256_32_worker(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iYRegDst, uint8_t iYRegMsk, uint8_t iEffSeg, RTGCPTR GCPtrEffSrc)
10049{
10050 uint32_t fAccessed = 0;
10051
10052 PRTUINT128U puDstLo = (PRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDst];
10053 PRTUINT128U puDstHi = (PRTUINT128U)&pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDst];
10054 PCRTUINT128U puMskLo = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegMsk];
10055 PCRTUINT128U puMskHi = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegMsk];
10056 PCRTUINT256U puSrc;
10057
10058 for (uint32_t i = 0; i < RT_ELEMENTS(puMskLo->au32); i++)
10059 {
10060 fAccessed |= puMskLo->au32[i] | puMskHi->au32[i];
10061 }
10062
10063 if (fAccessed & RT_BIT(31)) {
10064 /*
10065 * Access the source memory.
10066 */
10067 uint8_t bUnmapInfo;
10068 void *pvMemSrc;
10069 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMemSrc, &bUnmapInfo, sizeof(*puSrc),
10070 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 0);
10071 if (rcStrict != VINF_SUCCESS)
10072 return rcStrict;
10073
10074 puSrc = (PCRTUINT256U)pvMemSrc;
10075
10076 uint8_t const iHalf = RT_ELEMENTS(puSrc->au32) / 2;
10077
10078 for (uint32_t i = 0; i < iHalf; i++)
10079 {
10080 puDstLo->au32[i] = (puMskLo->au32[i] & RT_BIT(31)) ? puSrc->au32[i] : 0;
10081 }
10082 for (uint32_t i = iHalf; i < RT_ELEMENTS(puSrc->au32); i++)
10083 {
10084 puDstHi->au32[i - iHalf] = (puMskHi->au32[i - iHalf] & RT_BIT(31)) ? puSrc->au32[i] : 0;
10085 }
10086
10087 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
10088 if (rcStrict != VINF_SUCCESS)
10089 return rcStrict;
10090 }
10091 else
10092 {
10093 puDstLo->au64[0] = 0;
10094 puDstLo->au64[1] = 0;
10095 puDstHi->au64[0] = 0;
10096 puDstHi->au64[1] = 0;
10097 }
10098
10099 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
10100}
10101
10102
10103/**
10104 * Worker for 'VMASKMOVPS / VPMASKMOVD' 128-bit 32-bit-masked store.
10105 *
10106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10107 * @param cbInstr The current instruction length.
10108 * @param iEffSeg The effective segment.
10109 * @param GCPtrEffDst The destination memory address.
10110 * @param iXRegMsk The mask XMM register index.
10111 * @param iXRegSrc The source XMM register index.
10112 */
10113static VBOXSTRICTRC iemCImpl_maskmov_store_u128_32_worker(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrEffDst, uint8_t iXRegMsk, uint8_t iXRegSrc)
10114{
10115 uint32_t fAccessed = 0;
10116
10117 PRTUINT128U puDst;
10118 PCRTUINT128U puMsk = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegMsk];
10119 PCRTUINT128U puSrc = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegSrc];
10120
10121 for (uint32_t i = 0; i < RT_ELEMENTS(puMsk->au32); i++)
10122 {
10123 fAccessed |= puMsk->au32[i];
10124 }
10125
10126 if (fAccessed & RT_BIT(31)) {
10127 /*
10128 * Access the destination memory.
10129 */
10130 uint8_t bUnmapInfo;
10131 void *pvMemDst;
10132 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMemDst, &bUnmapInfo, sizeof(*puDst),
10133 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_RW, 0);
10134 if (rcStrict != VINF_SUCCESS)
10135 return rcStrict;
10136
10137 puDst = (PRTUINT128U)pvMemDst;
10138
10139 for (uint32_t i = 0; i < RT_ELEMENTS(puDst->au32); i++)
10140 {
10141 if (puMsk->au32[i] & RT_BIT(31))
10142 puDst->au32[i] = puSrc->au32[i];
10143 }
10144
10145 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
10146 if (rcStrict != VINF_SUCCESS)
10147 return rcStrict;
10148 }
10149
10150 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
10151}
10152
10153
10154
10155/**
10156 * Worker for 'VMASKMOVPS / VPMASKMOVD' 256-bit 32-bit-masked store.
10157 *
10158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10159 * @param cbInstr The current instruction length.
10160 * @param iEffSeg The effective segment.
10161 * @param GCPtrEffDst The destination memory address.
10162 * @param iYRegMsk The mask YMM register index.
10163 * @param iYRegSrc The source YMM register index.
10164 */
10165static VBOXSTRICTRC iemCImpl_maskmov_store_u256_32_worker(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrEffDst, uint8_t iYRegMsk, uint8_t iYRegSrc)
10166{
10167 uint32_t fAccessed = 0;
10168
10169 PRTUINT256U puDst;
10170 PCRTUINT128U puMskLo = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegMsk];
10171 PCRTUINT128U puMskHi = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegMsk];
10172 PCRTUINT128U puSrcLo = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc];
10173 PCRTUINT128U puSrcHi = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc];
10174
10175 for (uint32_t i = 0; i < RT_ELEMENTS(puMskLo->au32); i++)
10176 {
10177 fAccessed |= puMskLo->au32[i] | puMskHi->au32[i];
10178 }
10179
10180 if (fAccessed & RT_BIT(31)) {
10181 /*
10182 * Access the destination memory.
10183 */
10184 uint8_t bUnmapInfo;
10185 void *pvMemDst;
10186 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMemDst, &bUnmapInfo, sizeof(*puDst),
10187 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_RW, 0);
10188 if (rcStrict != VINF_SUCCESS)
10189 return rcStrict;
10190
10191 puDst = (PRTUINT256U)pvMemDst;
10192
10193 uint8_t const iHalf = RT_ELEMENTS(puDst->au32) / 2;
10194
10195 for (uint32_t i = 0; i < iHalf; i++)
10196 {
10197 if (puMskLo->au32[i] & RT_BIT(31))
10198 puDst->au32[i] = puSrcLo->au32[i];
10199 }
10200 for (uint32_t i = iHalf; i < RT_ELEMENTS(puDst->au32); i++)
10201 {
10202 if (puMskHi->au32[i - iHalf] & RT_BIT(31))
10203 puDst->au32[i] = puSrcHi->au32[i - iHalf];
10204 }
10205
10206 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
10207 if (rcStrict != VINF_SUCCESS)
10208 return rcStrict;
10209 }
10210
10211 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
10212}
10213
10214
10215/**
10216 * Worker for 'VMASKMOVPD / VPMASKMOVQ' 128-bit 64-bit-masked load.
10217 *
10218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10219 * @param cbInstr The current instruction length.
10220 * @param iXRegDst The destination XMM register index.
10221 * @param iXRegMsk The mask XMM register index.
10222 * @param iEffSeg The effective segment.
10223 * @param GCPtrEffSrc The source memory address.
10224 */
10225static VBOXSTRICTRC iemCImpl_maskmov_load_u128_64_worker(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iXRegDst, uint8_t iXRegMsk, uint8_t iEffSeg, RTGCPTR GCPtrEffSrc)
10226{
10227 uint64_t fAccessed = 0;
10228
10229 PRTUINT128U puDst = (PRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDst];
10230 PCRTUINT128U puMsk = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegMsk];
10231 PCRTUINT128U puSrc;
10232
10233 for (uint32_t i = 0; i < RT_ELEMENTS(puMsk->au64); i++)
10234 {
10235 fAccessed |= puMsk->au64[i];
10236 }
10237
10238 if (fAccessed & RT_BIT_64(63)) {
10239 /*
10240 * Access the source memory.
10241 */
10242 uint8_t bUnmapInfo;
10243 void *pvMemSrc;
10244 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMemSrc, &bUnmapInfo, sizeof(*puSrc),
10245 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 0);
10246 if (rcStrict != VINF_SUCCESS)
10247 return rcStrict;
10248
10249 puSrc = (PCRTUINT128U)pvMemSrc;
10250
10251 for (uint32_t i = 0; i < RT_ELEMENTS(puSrc->au64); i++)
10252 {
10253 puDst->au64[i] = (puMsk->au64[i] & RT_BIT_64(63)) ? puSrc->au64[i] : 0;
10254 }
10255 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iXRegDst].au64[0] = 0;
10256 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iXRegDst].au64[1] = 0;
10257
10258 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
10259 if (rcStrict != VINF_SUCCESS)
10260 return rcStrict;
10261 }
10262 else
10263 {
10264 puDst->au64[0] = 0;
10265 puDst->au64[1] = 0;
10266 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iXRegDst].au64[0] = 0;
10267 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iXRegDst].au64[1] = 0;
10268 }
10269
10270 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
10271}
10272
10273
10274
10275/**
10276 * Worker for 'VMASKMOVPD / VPMASKMOVQ' 256-bit 64-bit-masked load.
10277 *
10278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10279 * @param cbInstr The current instruction length.
10280 * @param iYRegDst The destination YMM register index.
10281 * @param iYRegMsk The mask YMM register index.
10282 * @param iEffSeg The effective segment.
10283 * @param GCPtrEffSrc The source memory address.
10284 */
10285static VBOXSTRICTRC iemCImpl_maskmov_load_u256_64_worker(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iYRegDst, uint8_t iYRegMsk, uint8_t iEffSeg, RTGCPTR GCPtrEffSrc)
10286{
10287 uint64_t fAccessed = 0;
10288
10289 PRTUINT128U puDstLo = (PRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDst];
10290 PRTUINT128U puDstHi = (PRTUINT128U)&pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDst];
10291 PCRTUINT128U puMskLo = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegMsk];
10292 PCRTUINT128U puMskHi = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegMsk];
10293 PCRTUINT256U puSrc;
10294
10295 for (uint32_t i = 0; i < RT_ELEMENTS(puMskLo->au64); i++)
10296 {
10297 fAccessed |= puMskLo->au64[i] | puMskHi->au64[i];
10298 }
10299
10300 if (fAccessed & RT_BIT_64(63)) {
10301 /*
10302 * Access the source memory.
10303 */
10304 uint8_t bUnmapInfo;
10305 void *pvMemSrc;
10306 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMemSrc, &bUnmapInfo, sizeof(*puSrc),
10307 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 0);
10308 if (rcStrict != VINF_SUCCESS)
10309 return rcStrict;
10310
10311 puSrc = (PCRTUINT256U)pvMemSrc;
10312
10313 uint8_t const iHalf = RT_ELEMENTS(puSrc->au64) / 2;
10314
10315 for (uint32_t i = 0; i < iHalf; i++)
10316 {
10317 puDstLo->au64[i] = (puMskLo->au64[i] & RT_BIT_64(63)) ? puSrc->au64[i] : 0;
10318 }
10319 for (uint32_t i = iHalf; i < RT_ELEMENTS(puSrc->au64); i++)
10320 {
10321 puDstHi->au64[i - iHalf] = (puMskHi->au64[i - iHalf] & RT_BIT_64(63)) ? puSrc->au64[i] : 0;
10322 }
10323
10324 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
10325 if (rcStrict != VINF_SUCCESS)
10326 return rcStrict;
10327 }
10328 else
10329 {
10330 puDstLo->au64[0] = 0;
10331 puDstLo->au64[1] = 0;
10332 puDstHi->au64[0] = 0;
10333 puDstHi->au64[1] = 0;
10334 }
10335
10336 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
10337}
10338
10339
10340/**
10341 * Worker for 'VMASKMOVPD / VPMASKMOVQ' 128-bit 64-bit-masked store.
10342 *
10343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10344 * @param cbInstr The current instruction length.
10345 * @param iEffSeg The effective segment.
10346 * @param GCPtrEffDst The destination memory address.
10347 * @param iXRegMsk The mask XMM register index.
10348 * @param iXRegSrc The source XMM register index.
10349 */
10350static VBOXSTRICTRC iemCImpl_maskmov_store_u128_64_worker(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrEffDst, uint8_t iXRegMsk, uint8_t iXRegSrc)
10351{
10352 uint64_t fAccessed = 0;
10353
10354 PRTUINT128U puDst;
10355 PCRTUINT128U puMsk = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegMsk];
10356 PCRTUINT128U puSrc = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegSrc];
10357
10358 for (uint32_t i = 0; i < RT_ELEMENTS(puMsk->au64); i++)
10359 {
10360 fAccessed |= puMsk->au64[i];
10361 }
10362
10363 if (fAccessed & RT_BIT_64(63)) {
10364 /*
10365 * Access the destination memory.
10366 */
10367 uint8_t bUnmapInfo;
10368 void *pvMemDst;
10369 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMemDst, &bUnmapInfo, sizeof(*puDst),
10370 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_RW, 0);
10371 if (rcStrict != VINF_SUCCESS)
10372 return rcStrict;
10373
10374 puDst = (PRTUINT128U)pvMemDst;
10375
10376 for (uint32_t i = 0; i < RT_ELEMENTS(puDst->au64); i++)
10377 {
10378 if (puMsk->au64[i] & RT_BIT_64(63))
10379 puDst->au64[i] = puSrc->au64[i];
10380 }
10381
10382 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
10383 if (rcStrict != VINF_SUCCESS)
10384 return rcStrict;
10385 }
10386
10387 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
10388}
10389
10390
10391
10392/**
10393 * Worker for 'VMASKMOVPD / VPMASKMOVQ' 256-bit 64-bit-masked store.
10394 *
10395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10396 * @param cbInstr The current instruction length.
10397 * @param iEffSeg The effective segment.
10398 * @param GCPtrEffDst The destination memory address.
10399 * @param iYRegMsk The mask YMM register index.
10400 * @param iYRegSrc The source YMM register index.
10401 */
10402static VBOXSTRICTRC iemCImpl_maskmov_store_u256_64_worker(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrEffDst, uint8_t iYRegMsk, uint8_t iYRegSrc)
10403{
10404 uint64_t fAccessed = 0;
10405
10406 PRTUINT256U puDst;
10407 PCRTUINT128U puMskLo = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegMsk];
10408 PCRTUINT128U puMskHi = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegMsk];
10409 PCRTUINT128U puSrcLo = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc];
10410 PCRTUINT128U puSrcHi = (PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc];
10411
10412 for (uint32_t i = 0; i < RT_ELEMENTS(puMskLo->au64); i++)
10413 {
10414 fAccessed |= puMskLo->au64[i] | puMskHi->au64[i];
10415 }
10416
10417 if (fAccessed & RT_BIT_64(63)) {
10418 /*
10419 * Access the destination memory.
10420 */
10421 uint8_t bUnmapInfo;
10422 void *pvMemDst;
10423 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMemDst, &bUnmapInfo, sizeof(*puDst),
10424 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_RW, 0);
10425 if (rcStrict != VINF_SUCCESS)
10426 return rcStrict;
10427
10428 puDst = (PRTUINT256U)pvMemDst;
10429
10430 uint8_t const iHalf = RT_ELEMENTS(puDst->au64) / 2;
10431
10432 for (uint32_t i = 0; i < iHalf; i++)
10433 {
10434 if (puMskLo->au64[i] & RT_BIT_64(63))
10435 puDst->au64[i] = puSrcLo->au64[i];
10436 }
10437 for (uint32_t i = iHalf; i < RT_ELEMENTS(puDst->au64); i++)
10438 {
10439 if (puMskHi->au64[i - iHalf] & RT_BIT_64(63))
10440 puDst->au64[i] = puSrcHi->au64[i - iHalf];
10441 }
10442
10443 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
10444 if (rcStrict != VINF_SUCCESS)
10445 return rcStrict;
10446 }
10447
10448 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
10449}
10450
10451
10452/**
10453 * Implements 'VMASKMOVPS' 128-bit 32-bit-masked load.
10454 *
10455 * @param iXRegDst The destination XMM register index.
10456 * @param iXRegMsk The mask XMM register index.
10457 * @param iEffSeg The effective segment.
10458 * @param GCPtrEffSrc The source memory address.
10459 */
10460IEM_CIMPL_DEF_4(iemCImpl_vmaskmovps_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
10461{
10462 return iemCImpl_maskmov_load_u128_32_worker(pVCpu, cbInstr, iXRegDst, iXRegMsk, iEffSeg, GCPtrEffSrc);
10463}
10464
10465
10466/**
10467 * Implements 'VMASKMOVPS' 256-bit 32-bit-masked load.
10468 *
10469 * @param iYRegDst The destination YMM register index.
10470 * @param iYRegMsk The mask YMM register index.
10471 * @param iEffSeg The effective segment.
10472 * @param GCPtrEffSrc The source memory address.
10473 */
10474IEM_CIMPL_DEF_4(iemCImpl_vmaskmovps_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
10475{
10476 return iemCImpl_maskmov_load_u256_32_worker(pVCpu, cbInstr, iYRegDst, iYRegMsk, iEffSeg, GCPtrEffSrc);
10477}
10478
10479
10480/**
10481 * Implements 'VMASKMOVPS' 128-bit 32-bit-masked store.
10482 *
10483 * @param iEffSeg The effective segment.
10484 * @param GCPtrEffDst The destination memory address.
10485 * @param iXRegMsk The mask XMM register index.
10486 * @param iXRegSrc The source XMM register index.
10487 */
10488IEM_CIMPL_DEF_4(iemCImpl_vmaskmovps_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc)
10489{
10490 return iemCImpl_maskmov_store_u128_32_worker(pVCpu, cbInstr, iEffSeg, GCPtrEffDst, iXRegMsk, iXRegSrc);
10491}
10492
10493
10494/**
10495 * Implements 'VMASKMOVPS' 256-bit 32-bit-masked store.
10496 *
10497 * @param iEffSeg The effective segment.
10498 * @param GCPtrEffDst The destination memory address.
10499 * @param iYRegMsk The mask YMM register index.
10500 * @param iYRegSrc The source YMM register index.
10501 */
10502IEM_CIMPL_DEF_4(iemCImpl_vmaskmovps_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc)
10503{
10504 return iemCImpl_maskmov_store_u256_32_worker(pVCpu, cbInstr, iEffSeg, GCPtrEffDst, iYRegMsk, iYRegSrc);
10505}
10506
10507
10508/**
10509 * Implements 'VPMASKMOVD' 128-bit 32-bit-masked load.
10510 *
10511 * @param iXRegDst The destination XMM register index.
10512 * @param iXRegMsk The mask XMM register index.
10513 * @param iEffSeg The effective segment.
10514 * @param GCPtrEffSrc The source memory address.
10515 */
10516IEM_CIMPL_DEF_4(iemCImpl_vpmaskmovd_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
10517{
10518 return iemCImpl_maskmov_load_u128_32_worker(pVCpu, cbInstr, iXRegDst, iXRegMsk, iEffSeg, GCPtrEffSrc);
10519}
10520
10521
10522/**
10523 * Implements 'VPMASKMOVD' 256-bit 32-bit-masked load.
10524 *
10525 * @param iYRegDst The destination YMM register index.
10526 * @param iYRegMsk The mask YMM register index.
10527 * @param iEffSeg The effective segment.
10528 * @param GCPtrEffSrc The source memory address.
10529 */
10530IEM_CIMPL_DEF_4(iemCImpl_vpmaskmovd_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
10531{
10532 return iemCImpl_maskmov_load_u256_32_worker(pVCpu, cbInstr, iYRegDst, iYRegMsk, iEffSeg, GCPtrEffSrc);
10533}
10534
10535
10536/**
10537 * Implements 'VPMASKMOVD' 128-bit 32-bit-masked store.
10538 *
10539 * @param iEffSeg The effective segment.
10540 * @param GCPtrEffDst The destination memory address.
10541 * @param iXRegMsk The mask XMM register index.
10542 * @param iXRegSrc The source XMM register index.
10543 */
10544IEM_CIMPL_DEF_4(iemCImpl_vpmaskmovd_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc)
10545{
10546 return iemCImpl_maskmov_store_u128_32_worker(pVCpu, cbInstr, iEffSeg, GCPtrEffDst, iXRegMsk, iXRegSrc);
10547}
10548
10549
10550/**
10551 * Implements 'VPMASKMOVD' 256-bit 32-bit-masked store.
10552 *
10553 * @param iEffSeg The effective segment.
10554 * @param GCPtrEffDst The destination memory address.
10555 * @param iYRegMsk The mask YMM register index.
10556 * @param iYRegSrc The source YMM register index.
10557 */
10558IEM_CIMPL_DEF_4(iemCImpl_vpmaskmovd_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc)
10559{
10560 return iemCImpl_maskmov_store_u256_32_worker(pVCpu, cbInstr, iEffSeg, GCPtrEffDst, iYRegMsk, iYRegSrc);
10561}
10562
10563
10564/**
10565 * Implements 'VMASKMOVPD' 128-bit 64-bit-masked load.
10566 *
10567 * @param iXRegDst The destination XMM register index.
10568 * @param iXRegMsk The mask XMM register index.
10569 * @param iEffSeg The effective segment.
10570 * @param GCPtrEffSrc The source memory address.
10571 */
10572IEM_CIMPL_DEF_4(iemCImpl_vmaskmovpd_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
10573{
10574 return iemCImpl_maskmov_load_u128_64_worker(pVCpu, cbInstr, iXRegDst, iXRegMsk, iEffSeg, GCPtrEffSrc);
10575}
10576
10577
10578/**
10579 * Implements 'VMASKMOVPD' 256-bit 64-bit-masked load.
10580 *
10581 * @param iYRegDst The destination YMM register index.
10582 * @param iYRegMsk The mask YMM register index.
10583 * @param iEffSeg The effective segment.
10584 * @param GCPtrEffSrc The source memory address.
10585 */
10586IEM_CIMPL_DEF_4(iemCImpl_vmaskmovpd_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
10587{
10588 return iemCImpl_maskmov_load_u256_64_worker(pVCpu, cbInstr, iYRegDst, iYRegMsk, iEffSeg, GCPtrEffSrc);
10589}
10590
10591
10592/**
10593 * Implements 'VMASKMOVPD' 128-bit 64-bit-masked store.
10594 *
10595 * @param iEffSeg The effective segment.
10596 * @param GCPtrEffDst The destination memory address.
10597 * @param iXRegMsk The mask XMM register index.
10598 * @param iXRegSrc The source XMM register index.
10599 */
10600IEM_CIMPL_DEF_4(iemCImpl_vmaskmovpd_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc)
10601{
10602 return iemCImpl_maskmov_store_u128_64_worker(pVCpu, cbInstr, iEffSeg, GCPtrEffDst, iXRegMsk, iXRegSrc);
10603}
10604
10605
10606/**
10607 * Implements 'VMASKMOVPD' 256-bit 64-bit-masked store.
10608 *
10609 * @param iEffSeg The effective segment.
10610 * @param GCPtrEffDst The destination memory address.
10611 * @param iYRegMsk The mask YMM register index.
10612 * @param iYRegSrc The source YMM register index.
10613 */
10614IEM_CIMPL_DEF_4(iemCImpl_vmaskmovpd_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc)
10615{
10616 return iemCImpl_maskmov_store_u256_64_worker(pVCpu, cbInstr, iEffSeg, GCPtrEffDst, iYRegMsk, iYRegSrc);
10617}
10618
10619
10620/**
10621 * Implements 'VPMASKMOVQ' 128-bit 64-bit-masked load.
10622 *
10623 * @param iXRegDst The destination XMM register index.
10624 * @param iXRegMsk The mask XMM register index.
10625 * @param iEffSeg The effective segment.
10626 * @param GCPtrEffSrc The source memory address.
10627 */
10628IEM_CIMPL_DEF_4(iemCImpl_vpmaskmovq_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
10629{
10630 return iemCImpl_maskmov_load_u128_64_worker(pVCpu, cbInstr, iXRegDst, iXRegMsk, iEffSeg, GCPtrEffSrc);
10631}
10632
10633
10634/**
10635 * Implements 'VPMASKMOVQ' 256-bit 64-bit-masked load.
10636 *
10637 * @param iYRegDst The destination YMM register index.
10638 * @param iYRegMsk The mask YMM register index.
10639 * @param iEffSeg The effective segment.
10640 * @param GCPtrEffSrc The source memory address.
10641 */
10642IEM_CIMPL_DEF_4(iemCImpl_vpmaskmovq_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
10643{
10644 return iemCImpl_maskmov_load_u256_64_worker(pVCpu, cbInstr, iYRegDst, iYRegMsk, iEffSeg, GCPtrEffSrc);
10645}
10646
10647
10648/**
10649 * Implements 'VPMASKMOVQ' 128-bit 64-bit-masked store.
10650 *
10651 * @param iEffSeg The effective segment.
10652 * @param GCPtrEffDst The destination memory address.
10653 * @param iXRegMsk The mask XMM register index.
10654 * @param iXRegSrc The source XMM register index.
10655 */
10656IEM_CIMPL_DEF_4(iemCImpl_vpmaskmovq_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc)
10657{
10658 return iemCImpl_maskmov_store_u128_64_worker(pVCpu, cbInstr, iEffSeg, GCPtrEffDst, iXRegMsk, iXRegSrc);
10659}
10660
10661
10662/**
10663 * Implements 'VPMASKMOVQ' 256-bit 64-bit-masked store.
10664 *
10665 * @param iEffSeg The effective segment.
10666 * @param GCPtrEffDst The destination memory address.
10667 * @param iYRegMsk The mask YMM register index.
10668 * @param iYRegSrc The source YMM register index.
10669 */
10670IEM_CIMPL_DEF_4(iemCImpl_vpmaskmovq_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc)
10671{
10672 return iemCImpl_maskmov_store_u256_64_worker(pVCpu, cbInstr, iEffSeg, GCPtrEffDst, iYRegMsk, iYRegSrc);
10673}
10674
10675
10676/**
10677 * Worker for 'VGATHERcxx' / 'VPGATHERxx' masked loads.
10678 *
10679 * @param u32PackedArgs Arguments packed to the tune of IEMGATHERARGS.
10680 * @param u32Disp The address displacement for the indices.
10681 */
10682IEM_CIMPL_DEF_2(iemCImpl_vpgather_worker_xx, uint32_t, u32PackedArgs, uint32_t, u32Disp)
10683{
10684 IEMGATHERARGS const PackedArgs = { u32PackedArgs };
10685 int32_t const offDisp = (int32_t)u32Disp;
10686
10687 if (PackedArgs.s.iYRegDst == PackedArgs.s.iYRegIdc ||
10688 PackedArgs.s.iYRegIdc == PackedArgs.s.iYRegMsk ||
10689 PackedArgs.s.iYRegDst == PackedArgs.s.iYRegMsk) return iemRaiseUndefinedOpcode(pVCpu);
10690
10691 Assert(PackedArgs.s.enmEffOpSize <= IEMMODE_64BIT);
10692 Assert(PackedArgs.s.enmEffAddrMode <= IEMMODE_64BIT);
10693
10694 uint32_t const cbMaxWidth = PackedArgs.s.fVex256 ? 32 : 16; /* Width of widest XMM / YMM register we will use: 32 or 16 */
10695 uint32_t const cbIdxWidth = PackedArgs.s.fIdxQword ? 8 : 4; /* Width of one index: 4-byte dword or 8-byte qword */
10696 uint32_t const cbValWidth = PackedArgs.s.fValQword ? 8 : 4; /* Width of one value: 4-byte dword or 8-byte qword */
10697 uint32_t const cMasks = cbMaxWidth / cbValWidth; /* Count of masks: 8 or 4 or 2 */
10698 uint32_t const cIndices = cbMaxWidth / cbIdxWidth; /* Count of indices: 8 or 4 or 2 */
10699 uint32_t const cValues = RT_MIN(cMasks, cIndices); /* Count of values to gather: 8 or 4 or 2 */
10700 Assert(cValues == 2 || cValues == 4 || cValues == 8);
10701 uint32_t const cbDstWidth = cValues * cbValWidth; /* Width of the destination & mask XMM / YMM registers: 32 or 16 or 8 */
10702 Assert(cbDstWidth == 8 || cbDstWidth == 16 || cbDstWidth == 32);
10703
10704 /*
10705 * Get the base pointer.
10706 */
10707 uint64_t u64Base = iemGRegFetchU64(pVCpu, PackedArgs.s.iGRegBase);
10708 if (PackedArgs.s.enmEffAddrMode != IEMMODE_64BIT)
10709 u64Base &= (PackedArgs.s.enmEffAddrMode == IEMMODE_16BIT ? UINT16_MAX : UINT32_MAX);
10710
10711 PRTUINT128U const apuDst[2] =
10712 {
10713 &pVCpu->cpum.GstCtx.XState.x87.aXMM[PackedArgs.s.iYRegDst].uXmm,
10714 &pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[PackedArgs.s.iYRegDst].uXmm
10715 };
10716 PCRTUINT128U const apuIdc[2] =
10717 {
10718 &pVCpu->cpum.GstCtx.XState.x87.aXMM[PackedArgs.s.iYRegIdc].uXmm,
10719 &pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[PackedArgs.s.iYRegIdc].uXmm
10720 };
10721 PRTUINT128U const apuMsk[2] =
10722 {
10723 &pVCpu->cpum.GstCtx.XState.x87.aXMM[PackedArgs.s.iYRegMsk].uXmm,
10724 &pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[PackedArgs.s.iYRegMsk].uXmm
10725 };
10726
10727 /*
10728 * Convert the masks to all-0s or all-1s, writing back to the mask
10729 * register so it will have the correct value if subsequent memory
10730 * accesses fault. Note that cMasks can be larger than cValues, in
10731 * the Qword-index, Dword-value instructions `vgatherqps' and
10732 * `vpgatherqd'. Updating the masks for as many masks as *would*
10733 * have been used if the destination register were wide enough --
10734 * is the observed behavior of a Core i7-10700.
10735 */
10736 if (!PackedArgs.s.fValQword)
10737 for (uint32_t i = 0; i < cMasks; i++)
10738 apuMsk[(i >> 2) & 1]->ai32[i & 3] >>= 31; /* Use arithmetic shift right (SAR/ASR) */
10739 else
10740 for (uint32_t i = 0; i < cMasks; i++)
10741 apuMsk[(i >> 1) & 1]->ai64[i & 1] >>= 63; /* Use arithmetic shift right (SAR/ASR) */
10742
10743 /*
10744 * Zero upper bits of mask if VEX128.
10745 */
10746 if (!PackedArgs.s.fVex256)
10747 {
10748 apuMsk[1]->au64[0] = 0;
10749 apuMsk[1]->au64[1] = 0;
10750 }
10751
10752 /*
10753 * Gather the individual values, as masked.
10754 */
10755 for (uint32_t i = 0; i < cValues; i++)
10756 {
10757 /*
10758 * Consult the mask determined above.
10759 */
10760 if ( !PackedArgs.s.fValQword
10761 ? apuMsk[(i >> 2) & 1]->au32[i & 3] != 0
10762 : apuMsk[(i >> 1) & 1]->au64[i & 1] != 0)
10763 {
10764 /*
10765 * Get the index, scale it, add scaled index + offset to the base pointer.
10766 */
10767 int64_t offIndex;
10768 if (!PackedArgs.s.fIdxQword)
10769 offIndex = apuIdc[(i >> 2) & 1]->ai32[i & 3];
10770 else
10771 offIndex = apuIdc[(i >> 1) & 1]->ai64[i & 1];
10772 offIndex <<= PackedArgs.s.iScale;
10773 offIndex += offDisp;
10774
10775 uint64_t u64Addr = u64Base + offIndex;
10776 if (PackedArgs.s.enmEffAddrMode != IEMMODE_64BIT)
10777 u64Addr &= UINT32_MAX;
10778
10779 /*
10780 * Gather it -- fetch this gather-item from guest memory.
10781 */
10782 VBOXSTRICTRC rcStrict;
10783 if (!PackedArgs.s.fValQword)
10784 rcStrict = iemMemFetchDataU32NoAc(pVCpu, &apuDst[(i >> 2) & 1]->au32[i & 3], PackedArgs.s.iEffSeg, u64Addr);
10785 else
10786 rcStrict = iemMemFetchDataU64NoAc(pVCpu, &apuDst[(i >> 1) & 1]->au64[i & 1], PackedArgs.s.iEffSeg, u64Addr);
10787 if (rcStrict != VINF_SUCCESS)
10788 return rcStrict;
10789
10790 /*
10791 * Now that we *didn't* fault, write all-0s to that part of the mask register.
10792 */
10793 if (!PackedArgs.s.fValQword)
10794 apuMsk[(i >> 2) & 1]->au32[i & 3] = 0;
10795 else
10796 apuMsk[(i >> 1) & 1]->au64[i & 1] = 0;
10797 /** @todo How is data breakpoints handled? The intel docs kind of hints they
10798 * may be raised here... */
10799 }
10800 }
10801
10802 /*
10803 * Zero upper bits of destination and mask.
10804 */
10805 if (cbDstWidth != 32)
10806 {
10807 apuDst[1]->au64[0] = 0;
10808 apuDst[1]->au64[1] = 0;
10809 apuMsk[1]->au64[0] = 0;
10810 apuMsk[1]->au64[1] = 0;
10811 if (cbDstWidth == 8)
10812 {
10813 apuDst[0]->au64[1] = 0;
10814 apuMsk[0]->au64[1] = 0;
10815 }
10816 }
10817
10818 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
10819}
10820
10821/** @} */
10822
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette