VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100266

Last change on this file since 100266 was 100266, checked in by vboxsync, 20 months ago

VMM/IEM: Collect opcode bytes while decoding instructions in the recompiler. Started on TB opcode validation prior/during execution (much more work needed). bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 468.9 KB
Line 
1/* $Id: IEMAll.cpp 100266 2023-06-23 14:15:10Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <iprt/asm-math.h>
130#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
131# include <iprt/asm-amd64-x86.h>
132#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
133# include <iprt/asm-arm.h>
134#endif
135#include <iprt/assert.h>
136#include <iprt/string.h>
137#include <iprt/x86.h>
138
139#include "IEMInline.h"
140
141
142/*********************************************************************************************************************************
143* Structures and Typedefs *
144*********************************************************************************************************************************/
145/**
146 * CPU exception classes.
147 */
148typedef enum IEMXCPTCLASS
149{
150 IEMXCPTCLASS_BENIGN,
151 IEMXCPTCLASS_CONTRIBUTORY,
152 IEMXCPTCLASS_PAGE_FAULT,
153 IEMXCPTCLASS_DOUBLE_FAULT
154} IEMXCPTCLASS;
155
156
157/*********************************************************************************************************************************
158* Global Variables *
159*********************************************************************************************************************************/
160#if defined(IEM_LOG_MEMORY_WRITES)
161/** What IEM just wrote. */
162uint8_t g_abIemWrote[256];
163/** How much IEM just wrote. */
164size_t g_cbIemWrote;
165#endif
166
167
168/*********************************************************************************************************************************
169* Internal Functions *
170*********************************************************************************************************************************/
171static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
172 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
173
174
175/**
176 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
177 * path.
178 *
179 * @returns IEM_F_BRK_PENDING_XXX or zero.
180 * @param pVCpu The cross context virtual CPU structure of the
181 * calling thread.
182 *
183 * @note Don't call directly, use iemCalcExecDbgFlags instead.
184 */
185uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
186{
187 uint32_t fExec = 0;
188
189 /*
190 * Process guest breakpoints.
191 */
192#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
193 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
194 { \
195 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
196 { \
197 case X86_DR7_RW_EO: \
198 fExec |= IEM_F_PENDING_BRK_INSTR; \
199 break; \
200 case X86_DR7_RW_WO: \
201 case X86_DR7_RW_RW: \
202 fExec |= IEM_F_PENDING_BRK_DATA; \
203 break; \
204 case X86_DR7_RW_IO: \
205 fExec |= IEM_F_PENDING_BRK_X86_IO; \
206 break; \
207 } \
208 } \
209 } while (0)
210
211 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
212 if (fGstDr7 & X86_DR7_ENABLED_MASK)
213 {
214 PROCESS_ONE_BP(fGstDr7, 0);
215 PROCESS_ONE_BP(fGstDr7, 1);
216 PROCESS_ONE_BP(fGstDr7, 2);
217 PROCESS_ONE_BP(fGstDr7, 3);
218 }
219
220 /*
221 * Process hypervisor breakpoints.
222 */
223 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
224 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
225 {
226 PROCESS_ONE_BP(fHyperDr7, 0);
227 PROCESS_ONE_BP(fHyperDr7, 1);
228 PROCESS_ONE_BP(fHyperDr7, 2);
229 PROCESS_ONE_BP(fHyperDr7, 3);
230 }
231
232 return fExec;
233}
234
235
236/**
237 * Initializes the decoder state.
238 *
239 * iemReInitDecoder is mostly a copy of this function.
240 *
241 * @param pVCpu The cross context virtual CPU structure of the
242 * calling thread.
243 * @param fExecOpts Optional execution flags:
244 * - IEM_F_BYPASS_HANDLERS
245 * - IEM_F_X86_DISREGARD_LOCK
246 */
247DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
248{
249 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
250 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
259
260 /* Execution state: */
261 uint32_t fExec;
262 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
263
264 /* Decoder state: */
265 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
267 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
268 {
269 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
270 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
271 }
272 else
273 {
274 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
275 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
276 }
277 pVCpu->iem.s.fPrefixes = 0;
278 pVCpu->iem.s.uRexReg = 0;
279 pVCpu->iem.s.uRexB = 0;
280 pVCpu->iem.s.uRexIndex = 0;
281 pVCpu->iem.s.idxPrefix = 0;
282 pVCpu->iem.s.uVex3rdReg = 0;
283 pVCpu->iem.s.uVexLength = 0;
284 pVCpu->iem.s.fEvexStuff = 0;
285 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
286#ifdef IEM_WITH_CODE_TLB
287 pVCpu->iem.s.pbInstrBuf = NULL;
288 pVCpu->iem.s.offInstrNextByte = 0;
289 pVCpu->iem.s.offCurInstrStart = 0;
290# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
291 pVCpu->iem.s.offOpcode = 0;
292# endif
293# ifdef VBOX_STRICT
294 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
295 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
296 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
297 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
298# endif
299#else
300 pVCpu->iem.s.offOpcode = 0;
301 pVCpu->iem.s.cbOpcode = 0;
302#endif
303 pVCpu->iem.s.offModRm = 0;
304 pVCpu->iem.s.cActiveMappings = 0;
305 pVCpu->iem.s.iNextMapping = 0;
306 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
307
308#ifdef DBGFTRACE_ENABLED
309 switch (IEM_GET_CPU_MODE(pVCpu))
310 {
311 case IEMMODE_64BIT:
312 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
313 break;
314 case IEMMODE_32BIT:
315 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
316 break;
317 case IEMMODE_16BIT:
318 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
319 break;
320 }
321#endif
322}
323
324
325/**
326 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
327 *
328 * This is mostly a copy of iemInitDecoder.
329 *
330 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
331 */
332DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
333{
334 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
336 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
338 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
339 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
343
344 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
345 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
346 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
347
348 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
349 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
350 pVCpu->iem.s.enmEffAddrMode = enmMode;
351 if (enmMode != IEMMODE_64BIT)
352 {
353 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
354 pVCpu->iem.s.enmEffOpSize = enmMode;
355 }
356 else
357 {
358 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
359 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
360 }
361 pVCpu->iem.s.fPrefixes = 0;
362 pVCpu->iem.s.uRexReg = 0;
363 pVCpu->iem.s.uRexB = 0;
364 pVCpu->iem.s.uRexIndex = 0;
365 pVCpu->iem.s.idxPrefix = 0;
366 pVCpu->iem.s.uVex3rdReg = 0;
367 pVCpu->iem.s.uVexLength = 0;
368 pVCpu->iem.s.fEvexStuff = 0;
369 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
370#ifdef IEM_WITH_CODE_TLB
371 if (pVCpu->iem.s.pbInstrBuf)
372 {
373 uint64_t off = (enmMode == IEMMODE_64BIT
374 ? pVCpu->cpum.GstCtx.rip
375 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
376 - pVCpu->iem.s.uInstrBufPc;
377 if (off < pVCpu->iem.s.cbInstrBufTotal)
378 {
379 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
380 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
381 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
382 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
383 else
384 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
385 }
386 else
387 {
388 pVCpu->iem.s.pbInstrBuf = NULL;
389 pVCpu->iem.s.offInstrNextByte = 0;
390 pVCpu->iem.s.offCurInstrStart = 0;
391 pVCpu->iem.s.cbInstrBuf = 0;
392 pVCpu->iem.s.cbInstrBufTotal = 0;
393 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
394 }
395 }
396 else
397 {
398 pVCpu->iem.s.offInstrNextByte = 0;
399 pVCpu->iem.s.offCurInstrStart = 0;
400 pVCpu->iem.s.cbInstrBuf = 0;
401 pVCpu->iem.s.cbInstrBufTotal = 0;
402# ifdef VBOX_STRICT
403 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
404# endif
405 }
406# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
407 pVCpu->iem.s.offOpcode = 0;
408# endif
409#else /* !IEM_WITH_CODE_TLB */
410 pVCpu->iem.s.cbOpcode = 0;
411 pVCpu->iem.s.offOpcode = 0;
412#endif /* !IEM_WITH_CODE_TLB */
413 pVCpu->iem.s.offModRm = 0;
414 Assert(pVCpu->iem.s.cActiveMappings == 0);
415 pVCpu->iem.s.iNextMapping = 0;
416 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
417 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
418
419#ifdef DBGFTRACE_ENABLED
420 switch (enmMode)
421 {
422 case IEMMODE_64BIT:
423 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
424 break;
425 case IEMMODE_32BIT:
426 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
427 break;
428 case IEMMODE_16BIT:
429 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
430 break;
431 }
432#endif
433}
434
435
436
437/**
438 * Prefetch opcodes the first time when starting executing.
439 *
440 * @returns Strict VBox status code.
441 * @param pVCpu The cross context virtual CPU structure of the
442 * calling thread.
443 * @param fExecOpts Optional execution flags:
444 * - IEM_F_BYPASS_HANDLERS
445 * - IEM_F_X86_DISREGARD_LOCK
446 */
447static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
448{
449 iemInitDecoder(pVCpu, fExecOpts);
450
451#ifndef IEM_WITH_CODE_TLB
452 /*
453 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
454 *
455 * First translate CS:rIP to a physical address.
456 *
457 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
458 * all relevant bytes from the first page, as it ASSUMES it's only ever
459 * called for dealing with CS.LIM, page crossing and instructions that
460 * are too long.
461 */
462 uint32_t cbToTryRead;
463 RTGCPTR GCPtrPC;
464 if (IEM_IS_64BIT_CODE(pVCpu))
465 {
466 cbToTryRead = GUEST_PAGE_SIZE;
467 GCPtrPC = pVCpu->cpum.GstCtx.rip;
468 if (IEM_IS_CANONICAL(GCPtrPC))
469 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
470 else
471 return iemRaiseGeneralProtectionFault0(pVCpu);
472 }
473 else
474 {
475 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
476 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
477 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
478 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
479 else
480 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
481 if (cbToTryRead) { /* likely */ }
482 else /* overflowed */
483 {
484 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
485 cbToTryRead = UINT32_MAX;
486 }
487 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
488 Assert(GCPtrPC <= UINT32_MAX);
489 }
490
491 PGMPTWALK Walk;
492 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
493 if (RT_SUCCESS(rc))
494 Assert(Walk.fSucceeded); /* probable. */
495 else
496 {
497 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
498# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
499 if (Walk.fFailed & PGM_WALKFAIL_EPT)
500 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
501# endif
502 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
503 }
504 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
505 else
506 {
507 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
508# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
509 if (Walk.fFailed & PGM_WALKFAIL_EPT)
510 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
511# endif
512 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
513 }
514 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
515 else
516 {
517 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
518# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
519 if (Walk.fFailed & PGM_WALKFAIL_EPT)
520 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
521# endif
522 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
523 }
524 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
525 /** @todo Check reserved bits and such stuff. PGM is better at doing
526 * that, so do it when implementing the guest virtual address
527 * TLB... */
528
529 /*
530 * Read the bytes at this address.
531 */
532 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
533 if (cbToTryRead > cbLeftOnPage)
534 cbToTryRead = cbLeftOnPage;
535 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
536 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
537
538 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
539 {
540 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
541 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
542 { /* likely */ }
543 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
544 {
545 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
546 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
547 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
548 }
549 else
550 {
551 Log((RT_SUCCESS(rcStrict)
552 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
553 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
554 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
555 return rcStrict;
556 }
557 }
558 else
559 {
560 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
561 if (RT_SUCCESS(rc))
562 { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
566 GCPtrPC, GCPhys, rc, cbToTryRead));
567 return rc;
568 }
569 }
570 pVCpu->iem.s.cbOpcode = cbToTryRead;
571#endif /* !IEM_WITH_CODE_TLB */
572 return VINF_SUCCESS;
573}
574
575
576/**
577 * Invalidates the IEM TLBs.
578 *
579 * This is called internally as well as by PGM when moving GC mappings.
580 *
581 * @param pVCpu The cross context virtual CPU structure of the calling
582 * thread.
583 */
584VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
585{
586#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
587 Log10(("IEMTlbInvalidateAll\n"));
588# ifdef IEM_WITH_CODE_TLB
589 pVCpu->iem.s.cbInstrBufTotal = 0;
590 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
591 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
592 { /* very likely */ }
593 else
594 {
595 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
596 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
597 while (i-- > 0)
598 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
599 }
600# endif
601
602# ifdef IEM_WITH_DATA_TLB
603 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
604 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
605 { /* very likely */ }
606 else
607 {
608 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
609 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
610 while (i-- > 0)
611 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
612 }
613# endif
614#else
615 RT_NOREF(pVCpu);
616#endif
617}
618
619
620/**
621 * Invalidates a page in the TLBs.
622 *
623 * @param pVCpu The cross context virtual CPU structure of the calling
624 * thread.
625 * @param GCPtr The address of the page to invalidate
626 * @thread EMT(pVCpu)
627 */
628VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
629{
630#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
631 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
632 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
633 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
634 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
635
636# ifdef IEM_WITH_CODE_TLB
637 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
638 {
639 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
640 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
641 pVCpu->iem.s.cbInstrBufTotal = 0;
642 }
643# endif
644
645# ifdef IEM_WITH_DATA_TLB
646 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
647 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
648# endif
649#else
650 NOREF(pVCpu); NOREF(GCPtr);
651#endif
652}
653
654
655#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
656/**
657 * Invalid both TLBs slow fashion following a rollover.
658 *
659 * Worker for IEMTlbInvalidateAllPhysical,
660 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
661 * iemMemMapJmp and others.
662 *
663 * @thread EMT(pVCpu)
664 */
665static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
666{
667 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
668 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
669 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
670
671 unsigned i;
672# ifdef IEM_WITH_CODE_TLB
673 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
674 while (i-- > 0)
675 {
676 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
677 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
678 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
679 }
680# endif
681# ifdef IEM_WITH_DATA_TLB
682 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
683 while (i-- > 0)
684 {
685 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
686 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
687 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
688 }
689# endif
690
691}
692#endif
693
694
695/**
696 * Invalidates the host physical aspects of the IEM TLBs.
697 *
698 * This is called internally as well as by PGM when moving GC mappings.
699 *
700 * @param pVCpu The cross context virtual CPU structure of the calling
701 * thread.
702 * @note Currently not used.
703 */
704VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
705{
706#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
707 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
708 Log10(("IEMTlbInvalidateAllPhysical\n"));
709
710# ifdef IEM_WITH_CODE_TLB
711 pVCpu->iem.s.cbInstrBufTotal = 0;
712# endif
713 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
714 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
715 {
716 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
717 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
718 }
719 else
720 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
721#else
722 NOREF(pVCpu);
723#endif
724}
725
726
727/**
728 * Invalidates the host physical aspects of the IEM TLBs.
729 *
730 * This is called internally as well as by PGM when moving GC mappings.
731 *
732 * @param pVM The cross context VM structure.
733 * @param idCpuCaller The ID of the calling EMT if available to the caller,
734 * otherwise NIL_VMCPUID.
735 *
736 * @remarks Caller holds the PGM lock.
737 */
738VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
739{
740#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
741 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
742 if (pVCpuCaller)
743 VMCPU_ASSERT_EMT(pVCpuCaller);
744 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
745
746 VMCC_FOR_EACH_VMCPU(pVM)
747 {
748# ifdef IEM_WITH_CODE_TLB
749 if (pVCpuCaller == pVCpu)
750 pVCpu->iem.s.cbInstrBufTotal = 0;
751# endif
752
753 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
754 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
755 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
756 { /* likely */}
757 else if (pVCpuCaller == pVCpu)
758 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
759 else
760 {
761 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
762 continue;
763 }
764 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
765 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
766 }
767 VMCC_FOR_EACH_VMCPU_END(pVM);
768
769#else
770 RT_NOREF(pVM, idCpuCaller);
771#endif
772}
773
774
775/**
776 * Flushes the prefetch buffer, light version.
777 */
778void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
779{
780#ifndef IEM_WITH_CODE_TLB
781 pVCpu->iem.s.cbOpcode = cbInstr;
782#else
783 RT_NOREF(pVCpu, cbInstr);
784#endif
785}
786
787
788/**
789 * Flushes the prefetch buffer, heavy version.
790 */
791void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
792{
793#ifndef IEM_WITH_CODE_TLB
794 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
795#elif 1
796 pVCpu->iem.s.pbInstrBuf = NULL;
797 RT_NOREF(cbInstr);
798#else
799 RT_NOREF(pVCpu, cbInstr);
800#endif
801}
802
803
804
805#ifdef IEM_WITH_CODE_TLB
806
807/**
808 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
809 * failure and jumps.
810 *
811 * We end up here for a number of reasons:
812 * - pbInstrBuf isn't yet initialized.
813 * - Advancing beyond the buffer boundrary (e.g. cross page).
814 * - Advancing beyond the CS segment limit.
815 * - Fetching from non-mappable page (e.g. MMIO).
816 *
817 * @param pVCpu The cross context virtual CPU structure of the
818 * calling thread.
819 * @param pvDst Where to return the bytes.
820 * @param cbDst Number of bytes to read.
821 *
822 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
823 */
824void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
825{
826# ifdef IN_RING3
827 for (;;)
828 {
829 Assert(cbDst <= 8);
830 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
831
832 /*
833 * We might have a partial buffer match, deal with that first to make the
834 * rest simpler. This is the first part of the cross page/buffer case.
835 */
836 if (pVCpu->iem.s.pbInstrBuf != NULL)
837 {
838 if (offBuf < pVCpu->iem.s.cbInstrBuf)
839 {
840 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
841 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
842 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
843
844 cbDst -= cbCopy;
845 pvDst = (uint8_t *)pvDst + cbCopy;
846 offBuf += cbCopy;
847 pVCpu->iem.s.offInstrNextByte += offBuf;
848 }
849 }
850
851 /*
852 * Check segment limit, figuring how much we're allowed to access at this point.
853 *
854 * We will fault immediately if RIP is past the segment limit / in non-canonical
855 * territory. If we do continue, there are one or more bytes to read before we
856 * end up in trouble and we need to do that first before faulting.
857 */
858 RTGCPTR GCPtrFirst;
859 uint32_t cbMaxRead;
860 if (IEM_IS_64BIT_CODE(pVCpu))
861 {
862 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
863 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
864 { /* likely */ }
865 else
866 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
867 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
868 }
869 else
870 {
871 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
872 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
873 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
874 { /* likely */ }
875 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
876 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
877 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
878 if (cbMaxRead != 0)
879 { /* likely */ }
880 else
881 {
882 /* Overflowed because address is 0 and limit is max. */
883 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
884 cbMaxRead = X86_PAGE_SIZE;
885 }
886 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
887 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
888 if (cbMaxRead2 < cbMaxRead)
889 cbMaxRead = cbMaxRead2;
890 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
891 }
892
893 /*
894 * Get the TLB entry for this piece of code.
895 */
896 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
897 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
898 if (pTlbe->uTag == uTag)
899 {
900 /* likely when executing lots of code, otherwise unlikely */
901# ifdef VBOX_WITH_STATISTICS
902 pVCpu->iem.s.CodeTlb.cTlbHits++;
903# endif
904 }
905 else
906 {
907 pVCpu->iem.s.CodeTlb.cTlbMisses++;
908 PGMPTWALK Walk;
909 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
910 if (RT_FAILURE(rc))
911 {
912#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
913 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
914 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
915#endif
916 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
917 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
918 }
919
920 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
921 Assert(Walk.fSucceeded);
922 pTlbe->uTag = uTag;
923 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
924 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
925 pTlbe->GCPhys = Walk.GCPhys;
926 pTlbe->pbMappingR3 = NULL;
927 }
928
929 /*
930 * Check TLB page table level access flags.
931 */
932 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
933 {
934 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
935 {
936 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
937 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
938 }
939 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
940 {
941 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
942 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
943 }
944 }
945
946 /*
947 * Look up the physical page info if necessary.
948 */
949 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
950 { /* not necessary */ }
951 else
952 {
953 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
954 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
955 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
956 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
957 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
958 { /* likely */ }
959 else
960 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
961 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
962 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
963 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
964 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
965 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
966 }
967
968# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
969 /*
970 * Try do a direct read using the pbMappingR3 pointer.
971 */
972 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
973 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
974 {
975 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
976 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
977 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
978 {
979 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
980 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
981 }
982 else
983 {
984 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
985 if (cbInstr + (uint32_t)cbDst <= 15)
986 {
987 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
988 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
989 }
990 else
991 {
992 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
993 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
994 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
995 }
996 }
997 if (cbDst <= cbMaxRead)
998 {
999 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1000 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1001 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1002 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1003 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1004 return;
1005 }
1006 pVCpu->iem.s.pbInstrBuf = NULL;
1007
1008 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1009 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1010 }
1011# else
1012# error "refactor as needed"
1013 /*
1014 * If there is no special read handling, so we can read a bit more and
1015 * put it in the prefetch buffer.
1016 */
1017 if ( cbDst < cbMaxRead
1018 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1019 {
1020 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1021 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1022 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1023 { /* likely */ }
1024 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1025 {
1026 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1027 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1028 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1029 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1030 }
1031 else
1032 {
1033 Log((RT_SUCCESS(rcStrict)
1034 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1035 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1036 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1037 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1038 }
1039 }
1040# endif
1041 /*
1042 * Special read handling, so only read exactly what's needed.
1043 * This is a highly unlikely scenario.
1044 */
1045 else
1046 {
1047 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1048
1049 /* Check instruction length. */
1050 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1051 if (RT_LIKELY(cbInstr + cbDst <= 15))
1052 { /* likely */ }
1053 else
1054 {
1055 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1056 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1057 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1058 }
1059
1060 /* Do the reading. */
1061 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1062 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1063 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1064 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1065 { /* likely */ }
1066 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1067 {
1068 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1069 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1070 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1071 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1072 }
1073 else
1074 {
1075 Log((RT_SUCCESS(rcStrict)
1076 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1077 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1078 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1079 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1080 }
1081 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1082 if (cbToRead == cbDst)
1083 return;
1084 }
1085
1086 /*
1087 * More to read, loop.
1088 */
1089 cbDst -= cbMaxRead;
1090 pvDst = (uint8_t *)pvDst + cbMaxRead;
1091 }
1092# else /* !IN_RING3 */
1093 RT_NOREF(pvDst, cbDst);
1094 if (pvDst || cbDst)
1095 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1096# endif /* !IN_RING3 */
1097}
1098
1099#else /* !IEM_WITH_CODE_TLB */
1100
1101/**
1102 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1103 * exception if it fails.
1104 *
1105 * @returns Strict VBox status code.
1106 * @param pVCpu The cross context virtual CPU structure of the
1107 * calling thread.
1108 * @param cbMin The minimum number of bytes relative offOpcode
1109 * that must be read.
1110 */
1111VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1112{
1113 /*
1114 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1115 *
1116 * First translate CS:rIP to a physical address.
1117 */
1118 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1119 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1120 uint8_t const cbLeft = cbOpcode - offOpcode;
1121 Assert(cbLeft < cbMin);
1122 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1123
1124 uint32_t cbToTryRead;
1125 RTGCPTR GCPtrNext;
1126 if (IEM_IS_64BIT_CODE(pVCpu))
1127 {
1128 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1129 if (!IEM_IS_CANONICAL(GCPtrNext))
1130 return iemRaiseGeneralProtectionFault0(pVCpu);
1131 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1132 }
1133 else
1134 {
1135 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1136 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1137 GCPtrNext32 += cbOpcode;
1138 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1139 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1140 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1141 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1142 if (!cbToTryRead) /* overflowed */
1143 {
1144 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1145 cbToTryRead = UINT32_MAX;
1146 /** @todo check out wrapping around the code segment. */
1147 }
1148 if (cbToTryRead < cbMin - cbLeft)
1149 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1150 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1151
1152 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1153 if (cbToTryRead > cbLeftOnPage)
1154 cbToTryRead = cbLeftOnPage;
1155 }
1156
1157 /* Restrict to opcode buffer space.
1158
1159 We're making ASSUMPTIONS here based on work done previously in
1160 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1161 be fetched in case of an instruction crossing two pages. */
1162 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1163 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1164 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1165 { /* likely */ }
1166 else
1167 {
1168 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1169 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1170 return iemRaiseGeneralProtectionFault0(pVCpu);
1171 }
1172
1173 PGMPTWALK Walk;
1174 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1175 if (RT_FAILURE(rc))
1176 {
1177 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1178#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1179 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1180 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1181#endif
1182 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1183 }
1184 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1185 {
1186 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1187#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1188 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1189 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1190#endif
1191 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1192 }
1193 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1194 {
1195 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1196#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1197 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1198 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1199#endif
1200 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1201 }
1202 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1203 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1204 /** @todo Check reserved bits and such stuff. PGM is better at doing
1205 * that, so do it when implementing the guest virtual address
1206 * TLB... */
1207
1208 /*
1209 * Read the bytes at this address.
1210 *
1211 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1212 * and since PATM should only patch the start of an instruction there
1213 * should be no need to check again here.
1214 */
1215 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1216 {
1217 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1218 cbToTryRead, PGMACCESSORIGIN_IEM);
1219 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1220 { /* likely */ }
1221 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1222 {
1223 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1224 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1225 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1226 }
1227 else
1228 {
1229 Log((RT_SUCCESS(rcStrict)
1230 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1231 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1232 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1233 return rcStrict;
1234 }
1235 }
1236 else
1237 {
1238 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1239 if (RT_SUCCESS(rc))
1240 { /* likely */ }
1241 else
1242 {
1243 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1244 return rc;
1245 }
1246 }
1247 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1248 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1249
1250 return VINF_SUCCESS;
1251}
1252
1253#endif /* !IEM_WITH_CODE_TLB */
1254#ifndef IEM_WITH_SETJMP
1255
1256/**
1257 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1258 *
1259 * @returns Strict VBox status code.
1260 * @param pVCpu The cross context virtual CPU structure of the
1261 * calling thread.
1262 * @param pb Where to return the opcode byte.
1263 */
1264VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1265{
1266 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1267 if (rcStrict == VINF_SUCCESS)
1268 {
1269 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1270 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1271 pVCpu->iem.s.offOpcode = offOpcode + 1;
1272 }
1273 else
1274 *pb = 0;
1275 return rcStrict;
1276}
1277
1278#else /* IEM_WITH_SETJMP */
1279
1280/**
1281 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1282 *
1283 * @returns The opcode byte.
1284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1285 */
1286uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1287{
1288# ifdef IEM_WITH_CODE_TLB
1289 uint8_t u8;
1290 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1291 return u8;
1292# else
1293 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1294 if (rcStrict == VINF_SUCCESS)
1295 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1296 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1297# endif
1298}
1299
1300#endif /* IEM_WITH_SETJMP */
1301
1302#ifndef IEM_WITH_SETJMP
1303
1304/**
1305 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1306 *
1307 * @returns Strict VBox status code.
1308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1309 * @param pu16 Where to return the opcode dword.
1310 */
1311VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1312{
1313 uint8_t u8;
1314 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1315 if (rcStrict == VINF_SUCCESS)
1316 *pu16 = (int8_t)u8;
1317 return rcStrict;
1318}
1319
1320
1321/**
1322 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1323 *
1324 * @returns Strict VBox status code.
1325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1326 * @param pu32 Where to return the opcode dword.
1327 */
1328VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1329{
1330 uint8_t u8;
1331 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1332 if (rcStrict == VINF_SUCCESS)
1333 *pu32 = (int8_t)u8;
1334 return rcStrict;
1335}
1336
1337
1338/**
1339 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1340 *
1341 * @returns Strict VBox status code.
1342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1343 * @param pu64 Where to return the opcode qword.
1344 */
1345VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1346{
1347 uint8_t u8;
1348 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1349 if (rcStrict == VINF_SUCCESS)
1350 *pu64 = (int8_t)u8;
1351 return rcStrict;
1352}
1353
1354#endif /* !IEM_WITH_SETJMP */
1355
1356
1357#ifndef IEM_WITH_SETJMP
1358
1359/**
1360 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1361 *
1362 * @returns Strict VBox status code.
1363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1364 * @param pu16 Where to return the opcode word.
1365 */
1366VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1367{
1368 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1369 if (rcStrict == VINF_SUCCESS)
1370 {
1371 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1372# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1373 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1374# else
1375 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1376# endif
1377 pVCpu->iem.s.offOpcode = offOpcode + 2;
1378 }
1379 else
1380 *pu16 = 0;
1381 return rcStrict;
1382}
1383
1384#else /* IEM_WITH_SETJMP */
1385
1386/**
1387 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1388 *
1389 * @returns The opcode word.
1390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1391 */
1392uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1393{
1394# ifdef IEM_WITH_CODE_TLB
1395 uint16_t u16;
1396 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1397 return u16;
1398# else
1399 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1400 if (rcStrict == VINF_SUCCESS)
1401 {
1402 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1403 pVCpu->iem.s.offOpcode += 2;
1404# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1405 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1406# else
1407 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1408# endif
1409 }
1410 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1411# endif
1412}
1413
1414#endif /* IEM_WITH_SETJMP */
1415
1416#ifndef IEM_WITH_SETJMP
1417
1418/**
1419 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1420 *
1421 * @returns Strict VBox status code.
1422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1423 * @param pu32 Where to return the opcode double word.
1424 */
1425VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1426{
1427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1428 if (rcStrict == VINF_SUCCESS)
1429 {
1430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1431 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1432 pVCpu->iem.s.offOpcode = offOpcode + 2;
1433 }
1434 else
1435 *pu32 = 0;
1436 return rcStrict;
1437}
1438
1439
1440/**
1441 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1442 *
1443 * @returns Strict VBox status code.
1444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1445 * @param pu64 Where to return the opcode quad word.
1446 */
1447VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1448{
1449 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1450 if (rcStrict == VINF_SUCCESS)
1451 {
1452 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1453 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1454 pVCpu->iem.s.offOpcode = offOpcode + 2;
1455 }
1456 else
1457 *pu64 = 0;
1458 return rcStrict;
1459}
1460
1461#endif /* !IEM_WITH_SETJMP */
1462
1463#ifndef IEM_WITH_SETJMP
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu32 Where to return the opcode dword.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1479 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1480# else
1481 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1482 pVCpu->iem.s.abOpcode[offOpcode + 1],
1483 pVCpu->iem.s.abOpcode[offOpcode + 2],
1484 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1485# endif
1486 pVCpu->iem.s.offOpcode = offOpcode + 4;
1487 }
1488 else
1489 *pu32 = 0;
1490 return rcStrict;
1491}
1492
1493#else /* IEM_WITH_SETJMP */
1494
1495/**
1496 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1497 *
1498 * @returns The opcode dword.
1499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1500 */
1501uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1502{
1503# ifdef IEM_WITH_CODE_TLB
1504 uint32_t u32;
1505 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1506 return u32;
1507# else
1508 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1509 if (rcStrict == VINF_SUCCESS)
1510 {
1511 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1512 pVCpu->iem.s.offOpcode = offOpcode + 4;
1513# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1514 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1515# else
1516 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1517 pVCpu->iem.s.abOpcode[offOpcode + 1],
1518 pVCpu->iem.s.abOpcode[offOpcode + 2],
1519 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1520# endif
1521 }
1522 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1523# endif
1524}
1525
1526#endif /* IEM_WITH_SETJMP */
1527
1528#ifndef IEM_WITH_SETJMP
1529
1530/**
1531 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1532 *
1533 * @returns Strict VBox status code.
1534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1535 * @param pu64 Where to return the opcode dword.
1536 */
1537VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1538{
1539 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1540 if (rcStrict == VINF_SUCCESS)
1541 {
1542 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1543 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1544 pVCpu->iem.s.abOpcode[offOpcode + 1],
1545 pVCpu->iem.s.abOpcode[offOpcode + 2],
1546 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1547 pVCpu->iem.s.offOpcode = offOpcode + 4;
1548 }
1549 else
1550 *pu64 = 0;
1551 return rcStrict;
1552}
1553
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1557 *
1558 * @returns Strict VBox status code.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 * @param pu64 Where to return the opcode qword.
1561 */
1562VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1563{
1564 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1565 if (rcStrict == VINF_SUCCESS)
1566 {
1567 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1568 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1569 pVCpu->iem.s.abOpcode[offOpcode + 1],
1570 pVCpu->iem.s.abOpcode[offOpcode + 2],
1571 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573 }
1574 else
1575 *pu64 = 0;
1576 return rcStrict;
1577}
1578
1579#endif /* !IEM_WITH_SETJMP */
1580
1581#ifndef IEM_WITH_SETJMP
1582
1583/**
1584 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1585 *
1586 * @returns Strict VBox status code.
1587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1588 * @param pu64 Where to return the opcode qword.
1589 */
1590VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1591{
1592 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1593 if (rcStrict == VINF_SUCCESS)
1594 {
1595 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1596# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1597 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1598# else
1599 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1600 pVCpu->iem.s.abOpcode[offOpcode + 1],
1601 pVCpu->iem.s.abOpcode[offOpcode + 2],
1602 pVCpu->iem.s.abOpcode[offOpcode + 3],
1603 pVCpu->iem.s.abOpcode[offOpcode + 4],
1604 pVCpu->iem.s.abOpcode[offOpcode + 5],
1605 pVCpu->iem.s.abOpcode[offOpcode + 6],
1606 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1607# endif
1608 pVCpu->iem.s.offOpcode = offOpcode + 8;
1609 }
1610 else
1611 *pu64 = 0;
1612 return rcStrict;
1613}
1614
1615#else /* IEM_WITH_SETJMP */
1616
1617/**
1618 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1619 *
1620 * @returns The opcode qword.
1621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1622 */
1623uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1624{
1625# ifdef IEM_WITH_CODE_TLB
1626 uint64_t u64;
1627 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1628 return u64;
1629# else
1630 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1631 if (rcStrict == VINF_SUCCESS)
1632 {
1633 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1634 pVCpu->iem.s.offOpcode = offOpcode + 8;
1635# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1636 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1637# else
1638 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1639 pVCpu->iem.s.abOpcode[offOpcode + 1],
1640 pVCpu->iem.s.abOpcode[offOpcode + 2],
1641 pVCpu->iem.s.abOpcode[offOpcode + 3],
1642 pVCpu->iem.s.abOpcode[offOpcode + 4],
1643 pVCpu->iem.s.abOpcode[offOpcode + 5],
1644 pVCpu->iem.s.abOpcode[offOpcode + 6],
1645 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1646# endif
1647 }
1648 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1649# endif
1650}
1651
1652#endif /* IEM_WITH_SETJMP */
1653
1654
1655
1656/** @name Misc Worker Functions.
1657 * @{
1658 */
1659
1660/**
1661 * Gets the exception class for the specified exception vector.
1662 *
1663 * @returns The class of the specified exception.
1664 * @param uVector The exception vector.
1665 */
1666static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1667{
1668 Assert(uVector <= X86_XCPT_LAST);
1669 switch (uVector)
1670 {
1671 case X86_XCPT_DE:
1672 case X86_XCPT_TS:
1673 case X86_XCPT_NP:
1674 case X86_XCPT_SS:
1675 case X86_XCPT_GP:
1676 case X86_XCPT_SX: /* AMD only */
1677 return IEMXCPTCLASS_CONTRIBUTORY;
1678
1679 case X86_XCPT_PF:
1680 case X86_XCPT_VE: /* Intel only */
1681 return IEMXCPTCLASS_PAGE_FAULT;
1682
1683 case X86_XCPT_DF:
1684 return IEMXCPTCLASS_DOUBLE_FAULT;
1685 }
1686 return IEMXCPTCLASS_BENIGN;
1687}
1688
1689
1690/**
1691 * Evaluates how to handle an exception caused during delivery of another event
1692 * (exception / interrupt).
1693 *
1694 * @returns How to handle the recursive exception.
1695 * @param pVCpu The cross context virtual CPU structure of the
1696 * calling thread.
1697 * @param fPrevFlags The flags of the previous event.
1698 * @param uPrevVector The vector of the previous event.
1699 * @param fCurFlags The flags of the current exception.
1700 * @param uCurVector The vector of the current exception.
1701 * @param pfXcptRaiseInfo Where to store additional information about the
1702 * exception condition. Optional.
1703 */
1704VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1705 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1706{
1707 /*
1708 * Only CPU exceptions can be raised while delivering other events, software interrupt
1709 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1710 */
1711 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1712 Assert(pVCpu); RT_NOREF(pVCpu);
1713 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1714
1715 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1716 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1717 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1718 {
1719 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1720 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1721 {
1722 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1723 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1724 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1725 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1726 {
1727 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1728 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1729 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1730 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1731 uCurVector, pVCpu->cpum.GstCtx.cr2));
1732 }
1733 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1734 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1735 {
1736 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1737 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1738 }
1739 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1740 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1741 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1742 {
1743 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1744 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1745 }
1746 }
1747 else
1748 {
1749 if (uPrevVector == X86_XCPT_NMI)
1750 {
1751 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1752 if (uCurVector == X86_XCPT_PF)
1753 {
1754 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1755 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1756 }
1757 }
1758 else if ( uPrevVector == X86_XCPT_AC
1759 && uCurVector == X86_XCPT_AC)
1760 {
1761 enmRaise = IEMXCPTRAISE_CPU_HANG;
1762 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1763 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1764 }
1765 }
1766 }
1767 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1768 {
1769 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1770 if (uCurVector == X86_XCPT_PF)
1771 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1772 }
1773 else
1774 {
1775 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1776 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1777 }
1778
1779 if (pfXcptRaiseInfo)
1780 *pfXcptRaiseInfo = fRaiseInfo;
1781 return enmRaise;
1782}
1783
1784
1785/**
1786 * Enters the CPU shutdown state initiated by a triple fault or other
1787 * unrecoverable conditions.
1788 *
1789 * @returns Strict VBox status code.
1790 * @param pVCpu The cross context virtual CPU structure of the
1791 * calling thread.
1792 */
1793static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1794{
1795 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1796 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1797
1798 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1799 {
1800 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1801 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1802 }
1803
1804 RT_NOREF(pVCpu);
1805 return VINF_EM_TRIPLE_FAULT;
1806}
1807
1808
1809/**
1810 * Validates a new SS segment.
1811 *
1812 * @returns VBox strict status code.
1813 * @param pVCpu The cross context virtual CPU structure of the
1814 * calling thread.
1815 * @param NewSS The new SS selctor.
1816 * @param uCpl The CPL to load the stack for.
1817 * @param pDesc Where to return the descriptor.
1818 */
1819static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1820{
1821 /* Null selectors are not allowed (we're not called for dispatching
1822 interrupts with SS=0 in long mode). */
1823 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1824 {
1825 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1826 return iemRaiseTaskSwitchFault0(pVCpu);
1827 }
1828
1829 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1830 if ((NewSS & X86_SEL_RPL) != uCpl)
1831 {
1832 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1833 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1834 }
1835
1836 /*
1837 * Read the descriptor.
1838 */
1839 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1840 if (rcStrict != VINF_SUCCESS)
1841 return rcStrict;
1842
1843 /*
1844 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1845 */
1846 if (!pDesc->Legacy.Gen.u1DescType)
1847 {
1848 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1849 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1850 }
1851
1852 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1853 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1854 {
1855 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1856 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1857 }
1858 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1859 {
1860 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1861 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1862 }
1863
1864 /* Is it there? */
1865 /** @todo testcase: Is this checked before the canonical / limit check below? */
1866 if (!pDesc->Legacy.Gen.u1Present)
1867 {
1868 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1869 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1870 }
1871
1872 return VINF_SUCCESS;
1873}
1874
1875/** @} */
1876
1877
1878/** @name Raising Exceptions.
1879 *
1880 * @{
1881 */
1882
1883
1884/**
1885 * Loads the specified stack far pointer from the TSS.
1886 *
1887 * @returns VBox strict status code.
1888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1889 * @param uCpl The CPL to load the stack for.
1890 * @param pSelSS Where to return the new stack segment.
1891 * @param puEsp Where to return the new stack pointer.
1892 */
1893static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1894{
1895 VBOXSTRICTRC rcStrict;
1896 Assert(uCpl < 4);
1897
1898 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1899 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1900 {
1901 /*
1902 * 16-bit TSS (X86TSS16).
1903 */
1904 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1905 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1906 {
1907 uint32_t off = uCpl * 4 + 2;
1908 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1909 {
1910 /** @todo check actual access pattern here. */
1911 uint32_t u32Tmp = 0; /* gcc maybe... */
1912 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1913 if (rcStrict == VINF_SUCCESS)
1914 {
1915 *puEsp = RT_LOWORD(u32Tmp);
1916 *pSelSS = RT_HIWORD(u32Tmp);
1917 return VINF_SUCCESS;
1918 }
1919 }
1920 else
1921 {
1922 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1923 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1924 }
1925 break;
1926 }
1927
1928 /*
1929 * 32-bit TSS (X86TSS32).
1930 */
1931 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1932 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1933 {
1934 uint32_t off = uCpl * 8 + 4;
1935 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1936 {
1937/** @todo check actual access pattern here. */
1938 uint64_t u64Tmp;
1939 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1940 if (rcStrict == VINF_SUCCESS)
1941 {
1942 *puEsp = u64Tmp & UINT32_MAX;
1943 *pSelSS = (RTSEL)(u64Tmp >> 32);
1944 return VINF_SUCCESS;
1945 }
1946 }
1947 else
1948 {
1949 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1950 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1951 }
1952 break;
1953 }
1954
1955 default:
1956 AssertFailed();
1957 rcStrict = VERR_IEM_IPE_4;
1958 break;
1959 }
1960
1961 *puEsp = 0; /* make gcc happy */
1962 *pSelSS = 0; /* make gcc happy */
1963 return rcStrict;
1964}
1965
1966
1967/**
1968 * Loads the specified stack pointer from the 64-bit TSS.
1969 *
1970 * @returns VBox strict status code.
1971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1972 * @param uCpl The CPL to load the stack for.
1973 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1974 * @param puRsp Where to return the new stack pointer.
1975 */
1976static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1977{
1978 Assert(uCpl < 4);
1979 Assert(uIst < 8);
1980 *puRsp = 0; /* make gcc happy */
1981
1982 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1983 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1984
1985 uint32_t off;
1986 if (uIst)
1987 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1988 else
1989 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1990 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1991 {
1992 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1993 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1994 }
1995
1996 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1997}
1998
1999
2000/**
2001 * Adjust the CPU state according to the exception being raised.
2002 *
2003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2004 * @param u8Vector The exception that has been raised.
2005 */
2006DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2007{
2008 switch (u8Vector)
2009 {
2010 case X86_XCPT_DB:
2011 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2012 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2013 break;
2014 /** @todo Read the AMD and Intel exception reference... */
2015 }
2016}
2017
2018
2019/**
2020 * Implements exceptions and interrupts for real mode.
2021 *
2022 * @returns VBox strict status code.
2023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2024 * @param cbInstr The number of bytes to offset rIP by in the return
2025 * address.
2026 * @param u8Vector The interrupt / exception vector number.
2027 * @param fFlags The flags.
2028 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2029 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2030 */
2031static VBOXSTRICTRC
2032iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2033 uint8_t cbInstr,
2034 uint8_t u8Vector,
2035 uint32_t fFlags,
2036 uint16_t uErr,
2037 uint64_t uCr2) RT_NOEXCEPT
2038{
2039 NOREF(uErr); NOREF(uCr2);
2040 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2041
2042 /*
2043 * Read the IDT entry.
2044 */
2045 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2046 {
2047 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2048 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2049 }
2050 RTFAR16 Idte;
2051 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2052 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2053 {
2054 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2055 return rcStrict;
2056 }
2057
2058 /*
2059 * Push the stack frame.
2060 */
2061 uint16_t *pu16Frame;
2062 uint64_t uNewRsp;
2063 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2064 if (rcStrict != VINF_SUCCESS)
2065 return rcStrict;
2066
2067 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2068#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2069 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2070 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2071 fEfl |= UINT16_C(0xf000);
2072#endif
2073 pu16Frame[2] = (uint16_t)fEfl;
2074 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2075 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2076 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2077 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2078 return rcStrict;
2079
2080 /*
2081 * Load the vector address into cs:ip and make exception specific state
2082 * adjustments.
2083 */
2084 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2085 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2086 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2087 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2088 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2089 pVCpu->cpum.GstCtx.rip = Idte.off;
2090 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2091 IEMMISC_SET_EFL(pVCpu, fEfl);
2092
2093 /** @todo do we actually do this in real mode? */
2094 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2095 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2096
2097 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2098 so best leave them alone in case we're in a weird kind of real mode... */
2099
2100 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2101}
2102
2103
2104/**
2105 * Loads a NULL data selector into when coming from V8086 mode.
2106 *
2107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2108 * @param pSReg Pointer to the segment register.
2109 */
2110DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2111{
2112 pSReg->Sel = 0;
2113 pSReg->ValidSel = 0;
2114 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2115 {
2116 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2117 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2118 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2119 }
2120 else
2121 {
2122 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2123 /** @todo check this on AMD-V */
2124 pSReg->u64Base = 0;
2125 pSReg->u32Limit = 0;
2126 }
2127}
2128
2129
2130/**
2131 * Loads a segment selector during a task switch in V8086 mode.
2132 *
2133 * @param pSReg Pointer to the segment register.
2134 * @param uSel The selector value to load.
2135 */
2136DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2137{
2138 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2139 pSReg->Sel = uSel;
2140 pSReg->ValidSel = uSel;
2141 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2142 pSReg->u64Base = uSel << 4;
2143 pSReg->u32Limit = 0xffff;
2144 pSReg->Attr.u = 0xf3;
2145}
2146
2147
2148/**
2149 * Loads a segment selector during a task switch in protected mode.
2150 *
2151 * In this task switch scenario, we would throw \#TS exceptions rather than
2152 * \#GPs.
2153 *
2154 * @returns VBox strict status code.
2155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2156 * @param pSReg Pointer to the segment register.
2157 * @param uSel The new selector value.
2158 *
2159 * @remarks This does _not_ handle CS or SS.
2160 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2161 */
2162static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2163{
2164 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2165
2166 /* Null data selector. */
2167 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2168 {
2169 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2171 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2172 return VINF_SUCCESS;
2173 }
2174
2175 /* Fetch the descriptor. */
2176 IEMSELDESC Desc;
2177 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2178 if (rcStrict != VINF_SUCCESS)
2179 {
2180 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2181 VBOXSTRICTRC_VAL(rcStrict)));
2182 return rcStrict;
2183 }
2184
2185 /* Must be a data segment or readable code segment. */
2186 if ( !Desc.Legacy.Gen.u1DescType
2187 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2188 {
2189 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2190 Desc.Legacy.Gen.u4Type));
2191 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2192 }
2193
2194 /* Check privileges for data segments and non-conforming code segments. */
2195 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2196 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2197 {
2198 /* The RPL and the new CPL must be less than or equal to the DPL. */
2199 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2200 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2201 {
2202 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2203 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2204 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2205 }
2206 }
2207
2208 /* Is it there? */
2209 if (!Desc.Legacy.Gen.u1Present)
2210 {
2211 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2212 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2213 }
2214
2215 /* The base and limit. */
2216 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2217 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2218
2219 /*
2220 * Ok, everything checked out fine. Now set the accessed bit before
2221 * committing the result into the registers.
2222 */
2223 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2224 {
2225 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2226 if (rcStrict != VINF_SUCCESS)
2227 return rcStrict;
2228 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2229 }
2230
2231 /* Commit */
2232 pSReg->Sel = uSel;
2233 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2234 pSReg->u32Limit = cbLimit;
2235 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2236 pSReg->ValidSel = uSel;
2237 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2238 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2239 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2240
2241 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2242 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2243 return VINF_SUCCESS;
2244}
2245
2246
2247/**
2248 * Performs a task switch.
2249 *
2250 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2251 * caller is responsible for performing the necessary checks (like DPL, TSS
2252 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2253 * reference for JMP, CALL, IRET.
2254 *
2255 * If the task switch is the due to a software interrupt or hardware exception,
2256 * the caller is responsible for validating the TSS selector and descriptor. See
2257 * Intel Instruction reference for INT n.
2258 *
2259 * @returns VBox strict status code.
2260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2261 * @param enmTaskSwitch The cause of the task switch.
2262 * @param uNextEip The EIP effective after the task switch.
2263 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2264 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2265 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2266 * @param SelTSS The TSS selector of the new task.
2267 * @param pNewDescTSS Pointer to the new TSS descriptor.
2268 */
2269VBOXSTRICTRC
2270iemTaskSwitch(PVMCPUCC pVCpu,
2271 IEMTASKSWITCH enmTaskSwitch,
2272 uint32_t uNextEip,
2273 uint32_t fFlags,
2274 uint16_t uErr,
2275 uint64_t uCr2,
2276 RTSEL SelTSS,
2277 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2278{
2279 Assert(!IEM_IS_REAL_MODE(pVCpu));
2280 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2281 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2282
2283 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2284 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2285 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2286 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2287 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2288
2289 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2290 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2291
2292 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2293 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2294
2295 /* Update CR2 in case it's a page-fault. */
2296 /** @todo This should probably be done much earlier in IEM/PGM. See
2297 * @bugref{5653#c49}. */
2298 if (fFlags & IEM_XCPT_FLAGS_CR2)
2299 pVCpu->cpum.GstCtx.cr2 = uCr2;
2300
2301 /*
2302 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2303 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2304 */
2305 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2306 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2307 if (uNewTSSLimit < uNewTSSLimitMin)
2308 {
2309 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2310 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2311 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2312 }
2313
2314 /*
2315 * Task switches in VMX non-root mode always cause task switches.
2316 * The new TSS must have been read and validated (DPL, limits etc.) before a
2317 * task-switch VM-exit commences.
2318 *
2319 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2320 */
2321 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2322 {
2323 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2324 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2325 }
2326
2327 /*
2328 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2329 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2330 */
2331 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2332 {
2333 uint32_t const uExitInfo1 = SelTSS;
2334 uint32_t uExitInfo2 = uErr;
2335 switch (enmTaskSwitch)
2336 {
2337 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2338 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2339 default: break;
2340 }
2341 if (fFlags & IEM_XCPT_FLAGS_ERR)
2342 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2343 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2344 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2345
2346 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2347 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2348 RT_NOREF2(uExitInfo1, uExitInfo2);
2349 }
2350
2351 /*
2352 * Check the current TSS limit. The last written byte to the current TSS during the
2353 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2354 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2355 *
2356 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2357 * end up with smaller than "legal" TSS limits.
2358 */
2359 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2360 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2361 if (uCurTSSLimit < uCurTSSLimitMin)
2362 {
2363 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2364 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2365 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2366 }
2367
2368 /*
2369 * Verify that the new TSS can be accessed and map it. Map only the required contents
2370 * and not the entire TSS.
2371 */
2372 void *pvNewTSS;
2373 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2374 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2375 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2376 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2377 * not perform correct translation if this happens. See Intel spec. 7.2.1
2378 * "Task-State Segment". */
2379 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2380 if (rcStrict != VINF_SUCCESS)
2381 {
2382 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2383 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2384 return rcStrict;
2385 }
2386
2387 /*
2388 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2389 */
2390 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2391 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2392 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2393 {
2394 PX86DESC pDescCurTSS;
2395 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2396 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2397 if (rcStrict != VINF_SUCCESS)
2398 {
2399 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2400 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2401 return rcStrict;
2402 }
2403
2404 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2405 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2406 if (rcStrict != VINF_SUCCESS)
2407 {
2408 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2409 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2410 return rcStrict;
2411 }
2412
2413 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2414 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2415 {
2416 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2417 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2418 fEFlags &= ~X86_EFL_NT;
2419 }
2420 }
2421
2422 /*
2423 * Save the CPU state into the current TSS.
2424 */
2425 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2426 if (GCPtrNewTSS == GCPtrCurTSS)
2427 {
2428 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2429 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2430 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2431 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2432 pVCpu->cpum.GstCtx.ldtr.Sel));
2433 }
2434 if (fIsNewTSS386)
2435 {
2436 /*
2437 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2438 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2439 */
2440 void *pvCurTSS32;
2441 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2442 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2443 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2444 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2445 if (rcStrict != VINF_SUCCESS)
2446 {
2447 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2448 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2449 return rcStrict;
2450 }
2451
2452 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2453 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2454 pCurTSS32->eip = uNextEip;
2455 pCurTSS32->eflags = fEFlags;
2456 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2457 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2458 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2459 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2460 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2461 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2462 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2463 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2464 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2465 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2466 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2467 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2468 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2469 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2470
2471 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2472 if (rcStrict != VINF_SUCCESS)
2473 {
2474 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2475 VBOXSTRICTRC_VAL(rcStrict)));
2476 return rcStrict;
2477 }
2478 }
2479 else
2480 {
2481 /*
2482 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2483 */
2484 void *pvCurTSS16;
2485 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2486 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2487 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2488 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2489 if (rcStrict != VINF_SUCCESS)
2490 {
2491 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2492 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2493 return rcStrict;
2494 }
2495
2496 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2497 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2498 pCurTSS16->ip = uNextEip;
2499 pCurTSS16->flags = (uint16_t)fEFlags;
2500 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2501 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2502 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2503 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2504 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2505 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2506 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2507 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2508 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2509 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2510 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2511 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2512
2513 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2514 if (rcStrict != VINF_SUCCESS)
2515 {
2516 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2517 VBOXSTRICTRC_VAL(rcStrict)));
2518 return rcStrict;
2519 }
2520 }
2521
2522 /*
2523 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2524 */
2525 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2526 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2527 {
2528 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2529 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2530 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2531 }
2532
2533 /*
2534 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2535 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2536 */
2537 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2538 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2539 bool fNewDebugTrap;
2540 if (fIsNewTSS386)
2541 {
2542 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2543 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2544 uNewEip = pNewTSS32->eip;
2545 uNewEflags = pNewTSS32->eflags;
2546 uNewEax = pNewTSS32->eax;
2547 uNewEcx = pNewTSS32->ecx;
2548 uNewEdx = pNewTSS32->edx;
2549 uNewEbx = pNewTSS32->ebx;
2550 uNewEsp = pNewTSS32->esp;
2551 uNewEbp = pNewTSS32->ebp;
2552 uNewEsi = pNewTSS32->esi;
2553 uNewEdi = pNewTSS32->edi;
2554 uNewES = pNewTSS32->es;
2555 uNewCS = pNewTSS32->cs;
2556 uNewSS = pNewTSS32->ss;
2557 uNewDS = pNewTSS32->ds;
2558 uNewFS = pNewTSS32->fs;
2559 uNewGS = pNewTSS32->gs;
2560 uNewLdt = pNewTSS32->selLdt;
2561 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2562 }
2563 else
2564 {
2565 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2566 uNewCr3 = 0;
2567 uNewEip = pNewTSS16->ip;
2568 uNewEflags = pNewTSS16->flags;
2569 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2570 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2571 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2572 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2573 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2574 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2575 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2576 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2577 uNewES = pNewTSS16->es;
2578 uNewCS = pNewTSS16->cs;
2579 uNewSS = pNewTSS16->ss;
2580 uNewDS = pNewTSS16->ds;
2581 uNewFS = 0;
2582 uNewGS = 0;
2583 uNewLdt = pNewTSS16->selLdt;
2584 fNewDebugTrap = false;
2585 }
2586
2587 if (GCPtrNewTSS == GCPtrCurTSS)
2588 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2589 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2590
2591 /*
2592 * We're done accessing the new TSS.
2593 */
2594 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2595 if (rcStrict != VINF_SUCCESS)
2596 {
2597 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2598 return rcStrict;
2599 }
2600
2601 /*
2602 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2603 */
2604 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2605 {
2606 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2607 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2608 if (rcStrict != VINF_SUCCESS)
2609 {
2610 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2611 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2612 return rcStrict;
2613 }
2614
2615 /* Check that the descriptor indicates the new TSS is available (not busy). */
2616 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2617 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2618 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2619
2620 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2621 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2622 if (rcStrict != VINF_SUCCESS)
2623 {
2624 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2625 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2626 return rcStrict;
2627 }
2628 }
2629
2630 /*
2631 * From this point on, we're technically in the new task. We will defer exceptions
2632 * until the completion of the task switch but before executing any instructions in the new task.
2633 */
2634 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2635 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2636 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2637 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2638 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2639 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2640 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2641
2642 /* Set the busy bit in TR. */
2643 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2644
2645 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2646 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2647 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2648 {
2649 uNewEflags |= X86_EFL_NT;
2650 }
2651
2652 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2653 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2654 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2655
2656 pVCpu->cpum.GstCtx.eip = uNewEip;
2657 pVCpu->cpum.GstCtx.eax = uNewEax;
2658 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2659 pVCpu->cpum.GstCtx.edx = uNewEdx;
2660 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2661 pVCpu->cpum.GstCtx.esp = uNewEsp;
2662 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2663 pVCpu->cpum.GstCtx.esi = uNewEsi;
2664 pVCpu->cpum.GstCtx.edi = uNewEdi;
2665
2666 uNewEflags &= X86_EFL_LIVE_MASK;
2667 uNewEflags |= X86_EFL_RA1_MASK;
2668 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2669
2670 /*
2671 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2672 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2673 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2674 */
2675 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2676 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2677
2678 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2679 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2680
2681 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2682 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2683
2684 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2685 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2686
2687 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2688 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2689
2690 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2691 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2692 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2693
2694 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2695 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2696 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2697 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2698
2699 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2700 {
2701 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2702 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2703 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2704 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2705 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2706 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2707 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2708 }
2709
2710 /*
2711 * Switch CR3 for the new task.
2712 */
2713 if ( fIsNewTSS386
2714 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2715 {
2716 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2717 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2718 AssertRCSuccessReturn(rc, rc);
2719
2720 /* Inform PGM. */
2721 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2722 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2723 AssertRCReturn(rc, rc);
2724 /* ignore informational status codes */
2725
2726 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2727 }
2728
2729 /*
2730 * Switch LDTR for the new task.
2731 */
2732 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2733 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2734 else
2735 {
2736 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2737
2738 IEMSELDESC DescNewLdt;
2739 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2740 if (rcStrict != VINF_SUCCESS)
2741 {
2742 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2743 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2744 return rcStrict;
2745 }
2746 if ( !DescNewLdt.Legacy.Gen.u1Present
2747 || DescNewLdt.Legacy.Gen.u1DescType
2748 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2749 {
2750 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2751 uNewLdt, DescNewLdt.Legacy.u));
2752 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2753 }
2754
2755 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2756 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2757 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2758 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2759 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2760 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2761 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2762 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2763 }
2764
2765 IEMSELDESC DescSS;
2766 if (IEM_IS_V86_MODE(pVCpu))
2767 {
2768 IEM_SET_CPL(pVCpu, 3);
2769 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2770 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2771 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2772 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2773 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2774 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2775
2776 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2777 DescSS.Legacy.u = 0;
2778 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2779 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2780 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2781 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2782 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2783 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2784 DescSS.Legacy.Gen.u2Dpl = 3;
2785 }
2786 else
2787 {
2788 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2789
2790 /*
2791 * Load the stack segment for the new task.
2792 */
2793 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2794 {
2795 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2796 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2797 }
2798
2799 /* Fetch the descriptor. */
2800 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2801 if (rcStrict != VINF_SUCCESS)
2802 {
2803 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2804 VBOXSTRICTRC_VAL(rcStrict)));
2805 return rcStrict;
2806 }
2807
2808 /* SS must be a data segment and writable. */
2809 if ( !DescSS.Legacy.Gen.u1DescType
2810 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2811 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2812 {
2813 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2814 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2815 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2816 }
2817
2818 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2819 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2820 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2821 {
2822 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2823 uNewCpl));
2824 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2825 }
2826
2827 /* Is it there? */
2828 if (!DescSS.Legacy.Gen.u1Present)
2829 {
2830 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2831 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2832 }
2833
2834 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2835 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2836
2837 /* Set the accessed bit before committing the result into SS. */
2838 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2839 {
2840 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2841 if (rcStrict != VINF_SUCCESS)
2842 return rcStrict;
2843 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2844 }
2845
2846 /* Commit SS. */
2847 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2848 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2849 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2850 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2851 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2852 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2854
2855 /* CPL has changed, update IEM before loading rest of segments. */
2856 IEM_SET_CPL(pVCpu, uNewCpl);
2857
2858 /*
2859 * Load the data segments for the new task.
2860 */
2861 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2862 if (rcStrict != VINF_SUCCESS)
2863 return rcStrict;
2864 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2865 if (rcStrict != VINF_SUCCESS)
2866 return rcStrict;
2867 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2868 if (rcStrict != VINF_SUCCESS)
2869 return rcStrict;
2870 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2871 if (rcStrict != VINF_SUCCESS)
2872 return rcStrict;
2873
2874 /*
2875 * Load the code segment for the new task.
2876 */
2877 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2878 {
2879 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2880 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2881 }
2882
2883 /* Fetch the descriptor. */
2884 IEMSELDESC DescCS;
2885 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2886 if (rcStrict != VINF_SUCCESS)
2887 {
2888 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2889 return rcStrict;
2890 }
2891
2892 /* CS must be a code segment. */
2893 if ( !DescCS.Legacy.Gen.u1DescType
2894 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2895 {
2896 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2897 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2898 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2899 }
2900
2901 /* For conforming CS, DPL must be less than or equal to the RPL. */
2902 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2903 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2904 {
2905 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2906 DescCS.Legacy.Gen.u2Dpl));
2907 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2908 }
2909
2910 /* For non-conforming CS, DPL must match RPL. */
2911 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2912 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2913 {
2914 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2915 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2916 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2917 }
2918
2919 /* Is it there? */
2920 if (!DescCS.Legacy.Gen.u1Present)
2921 {
2922 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2923 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2924 }
2925
2926 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2927 u64Base = X86DESC_BASE(&DescCS.Legacy);
2928
2929 /* Set the accessed bit before committing the result into CS. */
2930 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2931 {
2932 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2933 if (rcStrict != VINF_SUCCESS)
2934 return rcStrict;
2935 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2936 }
2937
2938 /* Commit CS. */
2939 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2940 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2941 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2942 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2943 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2944 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2946 }
2947
2948 /* Make sure the CPU mode is correct. */
2949 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2950 if (fExecNew != pVCpu->iem.s.fExec)
2951 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2952 pVCpu->iem.s.fExec = fExecNew;
2953
2954 /** @todo Debug trap. */
2955 if (fIsNewTSS386 && fNewDebugTrap)
2956 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2957
2958 /*
2959 * Construct the error code masks based on what caused this task switch.
2960 * See Intel Instruction reference for INT.
2961 */
2962 uint16_t uExt;
2963 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2964 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2965 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2966 uExt = 1;
2967 else
2968 uExt = 0;
2969
2970 /*
2971 * Push any error code on to the new stack.
2972 */
2973 if (fFlags & IEM_XCPT_FLAGS_ERR)
2974 {
2975 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2976 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2977 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2978
2979 /* Check that there is sufficient space on the stack. */
2980 /** @todo Factor out segment limit checking for normal/expand down segments
2981 * into a separate function. */
2982 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2983 {
2984 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2985 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2986 {
2987 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2988 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2989 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2990 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2991 }
2992 }
2993 else
2994 {
2995 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2996 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2997 {
2998 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2999 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3000 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3001 }
3002 }
3003
3004
3005 if (fIsNewTSS386)
3006 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3007 else
3008 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3009 if (rcStrict != VINF_SUCCESS)
3010 {
3011 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3012 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3013 return rcStrict;
3014 }
3015 }
3016
3017 /* Check the new EIP against the new CS limit. */
3018 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3019 {
3020 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3021 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3022 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3023 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3024 }
3025
3026 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3027 pVCpu->cpum.GstCtx.ss.Sel));
3028 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3029}
3030
3031
3032/**
3033 * Implements exceptions and interrupts for protected mode.
3034 *
3035 * @returns VBox strict status code.
3036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3037 * @param cbInstr The number of bytes to offset rIP by in the return
3038 * address.
3039 * @param u8Vector The interrupt / exception vector number.
3040 * @param fFlags The flags.
3041 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3042 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3043 */
3044static VBOXSTRICTRC
3045iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3046 uint8_t cbInstr,
3047 uint8_t u8Vector,
3048 uint32_t fFlags,
3049 uint16_t uErr,
3050 uint64_t uCr2) RT_NOEXCEPT
3051{
3052 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3053
3054 /*
3055 * Read the IDT entry.
3056 */
3057 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3058 {
3059 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3060 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3061 }
3062 X86DESC Idte;
3063 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3064 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3065 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3066 {
3067 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3068 return rcStrict;
3069 }
3070 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3071 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3072 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3073
3074 /*
3075 * Check the descriptor type, DPL and such.
3076 * ASSUMES this is done in the same order as described for call-gate calls.
3077 */
3078 if (Idte.Gate.u1DescType)
3079 {
3080 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3081 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3082 }
3083 bool fTaskGate = false;
3084 uint8_t f32BitGate = true;
3085 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3086 switch (Idte.Gate.u4Type)
3087 {
3088 case X86_SEL_TYPE_SYS_UNDEFINED:
3089 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3090 case X86_SEL_TYPE_SYS_LDT:
3091 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3092 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3093 case X86_SEL_TYPE_SYS_UNDEFINED2:
3094 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3095 case X86_SEL_TYPE_SYS_UNDEFINED3:
3096 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3097 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3098 case X86_SEL_TYPE_SYS_UNDEFINED4:
3099 {
3100 /** @todo check what actually happens when the type is wrong...
3101 * esp. call gates. */
3102 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3103 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3104 }
3105
3106 case X86_SEL_TYPE_SYS_286_INT_GATE:
3107 f32BitGate = false;
3108 RT_FALL_THRU();
3109 case X86_SEL_TYPE_SYS_386_INT_GATE:
3110 fEflToClear |= X86_EFL_IF;
3111 break;
3112
3113 case X86_SEL_TYPE_SYS_TASK_GATE:
3114 fTaskGate = true;
3115#ifndef IEM_IMPLEMENTS_TASKSWITCH
3116 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3117#endif
3118 break;
3119
3120 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3121 f32BitGate = false;
3122 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3123 break;
3124
3125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3126 }
3127
3128 /* Check DPL against CPL if applicable. */
3129 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3130 {
3131 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3132 {
3133 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3134 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3135 }
3136 }
3137
3138 /* Is it there? */
3139 if (!Idte.Gate.u1Present)
3140 {
3141 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3142 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3143 }
3144
3145 /* Is it a task-gate? */
3146 if (fTaskGate)
3147 {
3148 /*
3149 * Construct the error code masks based on what caused this task switch.
3150 * See Intel Instruction reference for INT.
3151 */
3152 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3153 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3154 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3155 RTSEL SelTSS = Idte.Gate.u16Sel;
3156
3157 /*
3158 * Fetch the TSS descriptor in the GDT.
3159 */
3160 IEMSELDESC DescTSS;
3161 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3162 if (rcStrict != VINF_SUCCESS)
3163 {
3164 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3165 VBOXSTRICTRC_VAL(rcStrict)));
3166 return rcStrict;
3167 }
3168
3169 /* The TSS descriptor must be a system segment and be available (not busy). */
3170 if ( DescTSS.Legacy.Gen.u1DescType
3171 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3172 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3173 {
3174 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3175 u8Vector, SelTSS, DescTSS.Legacy.au64));
3176 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3177 }
3178
3179 /* The TSS must be present. */
3180 if (!DescTSS.Legacy.Gen.u1Present)
3181 {
3182 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3183 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3184 }
3185
3186 /* Do the actual task switch. */
3187 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3188 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3189 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3190 }
3191
3192 /* A null CS is bad. */
3193 RTSEL NewCS = Idte.Gate.u16Sel;
3194 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3195 {
3196 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3197 return iemRaiseGeneralProtectionFault0(pVCpu);
3198 }
3199
3200 /* Fetch the descriptor for the new CS. */
3201 IEMSELDESC DescCS;
3202 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3203 if (rcStrict != VINF_SUCCESS)
3204 {
3205 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3206 return rcStrict;
3207 }
3208
3209 /* Must be a code segment. */
3210 if (!DescCS.Legacy.Gen.u1DescType)
3211 {
3212 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3213 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3214 }
3215 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3216 {
3217 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3218 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3219 }
3220
3221 /* Don't allow lowering the privilege level. */
3222 /** @todo Does the lowering of privileges apply to software interrupts
3223 * only? This has bearings on the more-privileged or
3224 * same-privilege stack behavior further down. A testcase would
3225 * be nice. */
3226 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3227 {
3228 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3229 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3230 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3231 }
3232
3233 /* Make sure the selector is present. */
3234 if (!DescCS.Legacy.Gen.u1Present)
3235 {
3236 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3237 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3238 }
3239
3240 /* Check the new EIP against the new CS limit. */
3241 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3242 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3243 ? Idte.Gate.u16OffsetLow
3244 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3245 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3246 if (uNewEip > cbLimitCS)
3247 {
3248 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3249 u8Vector, uNewEip, cbLimitCS, NewCS));
3250 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3251 }
3252 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3253
3254 /* Calc the flag image to push. */
3255 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3256 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3257 fEfl &= ~X86_EFL_RF;
3258 else
3259 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3260
3261 /* From V8086 mode only go to CPL 0. */
3262 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3263 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3264 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3265 {
3266 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3267 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3268 }
3269
3270 /*
3271 * If the privilege level changes, we need to get a new stack from the TSS.
3272 * This in turns means validating the new SS and ESP...
3273 */
3274 if (uNewCpl != IEM_GET_CPL(pVCpu))
3275 {
3276 RTSEL NewSS;
3277 uint32_t uNewEsp;
3278 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3279 if (rcStrict != VINF_SUCCESS)
3280 return rcStrict;
3281
3282 IEMSELDESC DescSS;
3283 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3284 if (rcStrict != VINF_SUCCESS)
3285 return rcStrict;
3286 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3287 if (!DescSS.Legacy.Gen.u1DefBig)
3288 {
3289 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3290 uNewEsp = (uint16_t)uNewEsp;
3291 }
3292
3293 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3294
3295 /* Check that there is sufficient space for the stack frame. */
3296 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3297 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3298 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3299 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3300
3301 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3302 {
3303 if ( uNewEsp - 1 > cbLimitSS
3304 || uNewEsp < cbStackFrame)
3305 {
3306 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3307 u8Vector, NewSS, uNewEsp, cbStackFrame));
3308 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3309 }
3310 }
3311 else
3312 {
3313 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3314 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3315 {
3316 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3317 u8Vector, NewSS, uNewEsp, cbStackFrame));
3318 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3319 }
3320 }
3321
3322 /*
3323 * Start making changes.
3324 */
3325
3326 /* Set the new CPL so that stack accesses use it. */
3327 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3328 IEM_SET_CPL(pVCpu, uNewCpl);
3329
3330 /* Create the stack frame. */
3331 RTPTRUNION uStackFrame;
3332 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3333 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3334 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3335 if (rcStrict != VINF_SUCCESS)
3336 return rcStrict;
3337 void * const pvStackFrame = uStackFrame.pv;
3338 if (f32BitGate)
3339 {
3340 if (fFlags & IEM_XCPT_FLAGS_ERR)
3341 *uStackFrame.pu32++ = uErr;
3342 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3343 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3344 uStackFrame.pu32[2] = fEfl;
3345 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3346 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3347 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3348 if (fEfl & X86_EFL_VM)
3349 {
3350 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3351 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3352 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3353 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3354 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3355 }
3356 }
3357 else
3358 {
3359 if (fFlags & IEM_XCPT_FLAGS_ERR)
3360 *uStackFrame.pu16++ = uErr;
3361 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3362 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3363 uStackFrame.pu16[2] = fEfl;
3364 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3365 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3366 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3367 if (fEfl & X86_EFL_VM)
3368 {
3369 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3370 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3371 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3372 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3373 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3374 }
3375 }
3376 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3377 if (rcStrict != VINF_SUCCESS)
3378 return rcStrict;
3379
3380 /* Mark the selectors 'accessed' (hope this is the correct time). */
3381 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3382 * after pushing the stack frame? (Write protect the gdt + stack to
3383 * find out.) */
3384 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3385 {
3386 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3387 if (rcStrict != VINF_SUCCESS)
3388 return rcStrict;
3389 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3390 }
3391
3392 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3393 {
3394 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3395 if (rcStrict != VINF_SUCCESS)
3396 return rcStrict;
3397 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3398 }
3399
3400 /*
3401 * Start comitting the register changes (joins with the DPL=CPL branch).
3402 */
3403 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3404 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3405 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3406 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3407 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3408 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3409 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3410 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3411 * SP is loaded).
3412 * Need to check the other combinations too:
3413 * - 16-bit TSS, 32-bit handler
3414 * - 32-bit TSS, 16-bit handler */
3415 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3416 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3417 else
3418 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3419
3420 if (fEfl & X86_EFL_VM)
3421 {
3422 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3423 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3424 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3425 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3426 }
3427 }
3428 /*
3429 * Same privilege, no stack change and smaller stack frame.
3430 */
3431 else
3432 {
3433 uint64_t uNewRsp;
3434 RTPTRUNION uStackFrame;
3435 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3436 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3437 if (rcStrict != VINF_SUCCESS)
3438 return rcStrict;
3439 void * const pvStackFrame = uStackFrame.pv;
3440
3441 if (f32BitGate)
3442 {
3443 if (fFlags & IEM_XCPT_FLAGS_ERR)
3444 *uStackFrame.pu32++ = uErr;
3445 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3446 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3447 uStackFrame.pu32[2] = fEfl;
3448 }
3449 else
3450 {
3451 if (fFlags & IEM_XCPT_FLAGS_ERR)
3452 *uStackFrame.pu16++ = uErr;
3453 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3454 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3455 uStackFrame.pu16[2] = fEfl;
3456 }
3457 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3458 if (rcStrict != VINF_SUCCESS)
3459 return rcStrict;
3460
3461 /* Mark the CS selector as 'accessed'. */
3462 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3463 {
3464 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3465 if (rcStrict != VINF_SUCCESS)
3466 return rcStrict;
3467 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3468 }
3469
3470 /*
3471 * Start committing the register changes (joins with the other branch).
3472 */
3473 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3474 }
3475
3476 /* ... register committing continues. */
3477 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3478 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3479 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3480 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3481 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3482 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3483
3484 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3485 fEfl &= ~fEflToClear;
3486 IEMMISC_SET_EFL(pVCpu, fEfl);
3487
3488 if (fFlags & IEM_XCPT_FLAGS_CR2)
3489 pVCpu->cpum.GstCtx.cr2 = uCr2;
3490
3491 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3492 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3493
3494 /* Make sure the execution flags are correct. */
3495 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3496 if (fExecNew != pVCpu->iem.s.fExec)
3497 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3498 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3499 pVCpu->iem.s.fExec = fExecNew;
3500 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3501
3502 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3503}
3504
3505
3506/**
3507 * Implements exceptions and interrupts for long mode.
3508 *
3509 * @returns VBox strict status code.
3510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3511 * @param cbInstr The number of bytes to offset rIP by in the return
3512 * address.
3513 * @param u8Vector The interrupt / exception vector number.
3514 * @param fFlags The flags.
3515 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3516 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3517 */
3518static VBOXSTRICTRC
3519iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3520 uint8_t cbInstr,
3521 uint8_t u8Vector,
3522 uint32_t fFlags,
3523 uint16_t uErr,
3524 uint64_t uCr2) RT_NOEXCEPT
3525{
3526 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3527
3528 /*
3529 * Read the IDT entry.
3530 */
3531 uint16_t offIdt = (uint16_t)u8Vector << 4;
3532 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3533 {
3534 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3535 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3536 }
3537 X86DESC64 Idte;
3538#ifdef _MSC_VER /* Shut up silly compiler warning. */
3539 Idte.au64[0] = 0;
3540 Idte.au64[1] = 0;
3541#endif
3542 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3543 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3544 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3545 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3546 {
3547 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3548 return rcStrict;
3549 }
3550 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3551 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3552 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3553
3554 /*
3555 * Check the descriptor type, DPL and such.
3556 * ASSUMES this is done in the same order as described for call-gate calls.
3557 */
3558 if (Idte.Gate.u1DescType)
3559 {
3560 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3561 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3562 }
3563 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3564 switch (Idte.Gate.u4Type)
3565 {
3566 case AMD64_SEL_TYPE_SYS_INT_GATE:
3567 fEflToClear |= X86_EFL_IF;
3568 break;
3569 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3570 break;
3571
3572 default:
3573 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3574 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3575 }
3576
3577 /* Check DPL against CPL if applicable. */
3578 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3579 {
3580 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3581 {
3582 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3583 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3584 }
3585 }
3586
3587 /* Is it there? */
3588 if (!Idte.Gate.u1Present)
3589 {
3590 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3591 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3592 }
3593
3594 /* A null CS is bad. */
3595 RTSEL NewCS = Idte.Gate.u16Sel;
3596 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3597 {
3598 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3599 return iemRaiseGeneralProtectionFault0(pVCpu);
3600 }
3601
3602 /* Fetch the descriptor for the new CS. */
3603 IEMSELDESC DescCS;
3604 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3605 if (rcStrict != VINF_SUCCESS)
3606 {
3607 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3608 return rcStrict;
3609 }
3610
3611 /* Must be a 64-bit code segment. */
3612 if (!DescCS.Long.Gen.u1DescType)
3613 {
3614 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3615 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3616 }
3617 if ( !DescCS.Long.Gen.u1Long
3618 || DescCS.Long.Gen.u1DefBig
3619 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3620 {
3621 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3622 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3623 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3624 }
3625
3626 /* Don't allow lowering the privilege level. For non-conforming CS
3627 selectors, the CS.DPL sets the privilege level the trap/interrupt
3628 handler runs at. For conforming CS selectors, the CPL remains
3629 unchanged, but the CS.DPL must be <= CPL. */
3630 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3631 * when CPU in Ring-0. Result \#GP? */
3632 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3633 {
3634 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3635 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3636 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3637 }
3638
3639
3640 /* Make sure the selector is present. */
3641 if (!DescCS.Legacy.Gen.u1Present)
3642 {
3643 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3644 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3645 }
3646
3647 /* Check that the new RIP is canonical. */
3648 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3649 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3650 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3651 if (!IEM_IS_CANONICAL(uNewRip))
3652 {
3653 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3654 return iemRaiseGeneralProtectionFault0(pVCpu);
3655 }
3656
3657 /*
3658 * If the privilege level changes or if the IST isn't zero, we need to get
3659 * a new stack from the TSS.
3660 */
3661 uint64_t uNewRsp;
3662 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3663 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3664 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3665 || Idte.Gate.u3IST != 0)
3666 {
3667 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3668 if (rcStrict != VINF_SUCCESS)
3669 return rcStrict;
3670 }
3671 else
3672 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3673 uNewRsp &= ~(uint64_t)0xf;
3674
3675 /*
3676 * Calc the flag image to push.
3677 */
3678 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3679 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3680 fEfl &= ~X86_EFL_RF;
3681 else
3682 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3683
3684 /*
3685 * Start making changes.
3686 */
3687 /* Set the new CPL so that stack accesses use it. */
3688 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3689 IEM_SET_CPL(pVCpu, uNewCpl);
3690/** @todo Setting CPL this early seems wrong as it would affect and errors we
3691 * raise accessing the stack and (?) GDT/LDT... */
3692
3693 /* Create the stack frame. */
3694 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3695 RTPTRUNION uStackFrame;
3696 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3697 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3698 if (rcStrict != VINF_SUCCESS)
3699 return rcStrict;
3700 void * const pvStackFrame = uStackFrame.pv;
3701
3702 if (fFlags & IEM_XCPT_FLAGS_ERR)
3703 *uStackFrame.pu64++ = uErr;
3704 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3705 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3706 uStackFrame.pu64[2] = fEfl;
3707 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3708 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3709 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3710 if (rcStrict != VINF_SUCCESS)
3711 return rcStrict;
3712
3713 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3714 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3715 * after pushing the stack frame? (Write protect the gdt + stack to
3716 * find out.) */
3717 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3718 {
3719 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3720 if (rcStrict != VINF_SUCCESS)
3721 return rcStrict;
3722 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3723 }
3724
3725 /*
3726 * Start comitting the register changes.
3727 */
3728 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3729 * hidden registers when interrupting 32-bit or 16-bit code! */
3730 if (uNewCpl != uOldCpl)
3731 {
3732 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3733 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3734 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3735 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3736 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3737 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3738 }
3739 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3740 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3741 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3742 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3743 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3744 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3745 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3746 pVCpu->cpum.GstCtx.rip = uNewRip;
3747
3748 fEfl &= ~fEflToClear;
3749 IEMMISC_SET_EFL(pVCpu, fEfl);
3750
3751 if (fFlags & IEM_XCPT_FLAGS_CR2)
3752 pVCpu->cpum.GstCtx.cr2 = uCr2;
3753
3754 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3755 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3756
3757 iemRecalcExecModeAndCplFlags(pVCpu);
3758
3759 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3760}
3761
3762
3763/**
3764 * Implements exceptions and interrupts.
3765 *
3766 * All exceptions and interrupts goes thru this function!
3767 *
3768 * @returns VBox strict status code.
3769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3770 * @param cbInstr The number of bytes to offset rIP by in the return
3771 * address.
3772 * @param u8Vector The interrupt / exception vector number.
3773 * @param fFlags The flags.
3774 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3775 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3776 */
3777VBOXSTRICTRC
3778iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3779 uint8_t cbInstr,
3780 uint8_t u8Vector,
3781 uint32_t fFlags,
3782 uint16_t uErr,
3783 uint64_t uCr2) RT_NOEXCEPT
3784{
3785 /*
3786 * Get all the state that we might need here.
3787 */
3788 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3789 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3790
3791#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3792 /*
3793 * Flush prefetch buffer
3794 */
3795 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3796#endif
3797
3798 /*
3799 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3800 */
3801 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3802 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3803 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3804 | IEM_XCPT_FLAGS_BP_INSTR
3805 | IEM_XCPT_FLAGS_ICEBP_INSTR
3806 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3807 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3808 {
3809 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3810 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3811 u8Vector = X86_XCPT_GP;
3812 uErr = 0;
3813 }
3814#ifdef DBGFTRACE_ENABLED
3815 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3816 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3817 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3818#endif
3819
3820 /*
3821 * Evaluate whether NMI blocking should be in effect.
3822 * Normally, NMI blocking is in effect whenever we inject an NMI.
3823 */
3824 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3825 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3826
3827#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3828 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3829 {
3830 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3831 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3832 return rcStrict0;
3833
3834 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3835 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3836 {
3837 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3838 fBlockNmi = false;
3839 }
3840 }
3841#endif
3842
3843#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3844 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3845 {
3846 /*
3847 * If the event is being injected as part of VMRUN, it isn't subject to event
3848 * intercepts in the nested-guest. However, secondary exceptions that occur
3849 * during injection of any event -are- subject to exception intercepts.
3850 *
3851 * See AMD spec. 15.20 "Event Injection".
3852 */
3853 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3854 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3855 else
3856 {
3857 /*
3858 * Check and handle if the event being raised is intercepted.
3859 */
3860 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3861 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3862 return rcStrict0;
3863 }
3864 }
3865#endif
3866
3867 /*
3868 * Set NMI blocking if necessary.
3869 */
3870 if (fBlockNmi)
3871 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3872
3873 /*
3874 * Do recursion accounting.
3875 */
3876 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3877 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3878 if (pVCpu->iem.s.cXcptRecursions == 0)
3879 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3880 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3881 else
3882 {
3883 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3884 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3885 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3886
3887 if (pVCpu->iem.s.cXcptRecursions >= 4)
3888 {
3889#ifdef DEBUG_bird
3890 AssertFailed();
3891#endif
3892 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3893 }
3894
3895 /*
3896 * Evaluate the sequence of recurring events.
3897 */
3898 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3899 NULL /* pXcptRaiseInfo */);
3900 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3901 { /* likely */ }
3902 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3903 {
3904 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3905 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3906 u8Vector = X86_XCPT_DF;
3907 uErr = 0;
3908#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3909 /* VMX nested-guest #DF intercept needs to be checked here. */
3910 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3911 {
3912 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3913 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3914 return rcStrict0;
3915 }
3916#endif
3917 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3918 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3919 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3920 }
3921 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3922 {
3923 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3924 return iemInitiateCpuShutdown(pVCpu);
3925 }
3926 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3927 {
3928 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3929 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3930 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3931 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3932 return VERR_EM_GUEST_CPU_HANG;
3933 }
3934 else
3935 {
3936 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3937 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3938 return VERR_IEM_IPE_9;
3939 }
3940
3941 /*
3942 * The 'EXT' bit is set when an exception occurs during deliver of an external
3943 * event (such as an interrupt or earlier exception)[1]. Privileged software
3944 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3945 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3946 *
3947 * [1] - Intel spec. 6.13 "Error Code"
3948 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3949 * [3] - Intel Instruction reference for INT n.
3950 */
3951 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3952 && (fFlags & IEM_XCPT_FLAGS_ERR)
3953 && u8Vector != X86_XCPT_PF
3954 && u8Vector != X86_XCPT_DF)
3955 {
3956 uErr |= X86_TRAP_ERR_EXTERNAL;
3957 }
3958 }
3959
3960 pVCpu->iem.s.cXcptRecursions++;
3961 pVCpu->iem.s.uCurXcpt = u8Vector;
3962 pVCpu->iem.s.fCurXcpt = fFlags;
3963 pVCpu->iem.s.uCurXcptErr = uErr;
3964 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3965
3966 /*
3967 * Extensive logging.
3968 */
3969#if defined(LOG_ENABLED) && defined(IN_RING3)
3970 if (LogIs3Enabled())
3971 {
3972 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3973 PVM pVM = pVCpu->CTX_SUFF(pVM);
3974 char szRegs[4096];
3975 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3976 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3977 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3978 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3979 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3980 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3981 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3982 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3983 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3984 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3985 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3986 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3987 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3988 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3989 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3990 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3991 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3992 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3993 " efer=%016VR{efer}\n"
3994 " pat=%016VR{pat}\n"
3995 " sf_mask=%016VR{sf_mask}\n"
3996 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3997 " lstar=%016VR{lstar}\n"
3998 " star=%016VR{star} cstar=%016VR{cstar}\n"
3999 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4000 );
4001
4002 char szInstr[256];
4003 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4004 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4005 szInstr, sizeof(szInstr), NULL);
4006 Log3(("%s%s\n", szRegs, szInstr));
4007 }
4008#endif /* LOG_ENABLED */
4009
4010 /*
4011 * Stats.
4012 */
4013 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4014 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4015 else if (u8Vector <= X86_XCPT_LAST)
4016 {
4017 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4018 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4019 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4020 }
4021
4022 /*
4023 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4024 * to ensure that a stale TLB or paging cache entry will only cause one
4025 * spurious #PF.
4026 */
4027 if ( u8Vector == X86_XCPT_PF
4028 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4029 IEMTlbInvalidatePage(pVCpu, uCr2);
4030
4031 /*
4032 * Call the mode specific worker function.
4033 */
4034 VBOXSTRICTRC rcStrict;
4035 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4036 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4037 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4038 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4039 else
4040 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4041
4042 /* Flush the prefetch buffer. */
4043#ifdef IEM_WITH_CODE_TLB
4044 pVCpu->iem.s.pbInstrBuf = NULL;
4045#else
4046 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4047#endif
4048
4049 /*
4050 * Unwind.
4051 */
4052 pVCpu->iem.s.cXcptRecursions--;
4053 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4054 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4055 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4056 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4057 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4058 return rcStrict;
4059}
4060
4061#ifdef IEM_WITH_SETJMP
4062/**
4063 * See iemRaiseXcptOrInt. Will not return.
4064 */
4065DECL_NO_RETURN(void)
4066iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4067 uint8_t cbInstr,
4068 uint8_t u8Vector,
4069 uint32_t fFlags,
4070 uint16_t uErr,
4071 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4072{
4073 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4074 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4075}
4076#endif
4077
4078
4079/** \#DE - 00. */
4080VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4081{
4082 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4083}
4084
4085
4086/** \#DB - 01.
4087 * @note This automatically clear DR7.GD. */
4088VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4089{
4090 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4091 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4092 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4093}
4094
4095
4096/** \#BR - 05. */
4097VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4098{
4099 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4100}
4101
4102
4103/** \#UD - 06. */
4104VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4105{
4106 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4107}
4108
4109
4110/** \#NM - 07. */
4111VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4112{
4113 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4114}
4115
4116
4117/** \#TS(err) - 0a. */
4118VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4119{
4120 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4121}
4122
4123
4124/** \#TS(tr) - 0a. */
4125VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4126{
4127 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4128 pVCpu->cpum.GstCtx.tr.Sel, 0);
4129}
4130
4131
4132/** \#TS(0) - 0a. */
4133VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4134{
4135 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4136 0, 0);
4137}
4138
4139
4140/** \#TS(err) - 0a. */
4141VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4142{
4143 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4144 uSel & X86_SEL_MASK_OFF_RPL, 0);
4145}
4146
4147
4148/** \#NP(err) - 0b. */
4149VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4150{
4151 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4152}
4153
4154
4155/** \#NP(sel) - 0b. */
4156VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4157{
4158 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4159 uSel & ~X86_SEL_RPL, 0);
4160}
4161
4162
4163/** \#SS(seg) - 0c. */
4164VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4165{
4166 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4167 uSel & ~X86_SEL_RPL, 0);
4168}
4169
4170
4171/** \#SS(err) - 0c. */
4172VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4173{
4174 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4175}
4176
4177
4178/** \#GP(n) - 0d. */
4179VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4180{
4181 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4182}
4183
4184
4185/** \#GP(0) - 0d. */
4186VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4187{
4188 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4189}
4190
4191#ifdef IEM_WITH_SETJMP
4192/** \#GP(0) - 0d. */
4193DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4194{
4195 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4196}
4197#endif
4198
4199
4200/** \#GP(sel) - 0d. */
4201VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4204 Sel & ~X86_SEL_RPL, 0);
4205}
4206
4207
4208/** \#GP(0) - 0d. */
4209VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4210{
4211 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4212}
4213
4214
4215/** \#GP(sel) - 0d. */
4216VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4217{
4218 NOREF(iSegReg); NOREF(fAccess);
4219 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4220 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4221}
4222
4223#ifdef IEM_WITH_SETJMP
4224/** \#GP(sel) - 0d, longjmp. */
4225DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4226{
4227 NOREF(iSegReg); NOREF(fAccess);
4228 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4229 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4230}
4231#endif
4232
4233/** \#GP(sel) - 0d. */
4234VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4235{
4236 NOREF(Sel);
4237 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4238}
4239
4240#ifdef IEM_WITH_SETJMP
4241/** \#GP(sel) - 0d, longjmp. */
4242DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4243{
4244 NOREF(Sel);
4245 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4246}
4247#endif
4248
4249
4250/** \#GP(sel) - 0d. */
4251VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4252{
4253 NOREF(iSegReg); NOREF(fAccess);
4254 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4255}
4256
4257#ifdef IEM_WITH_SETJMP
4258/** \#GP(sel) - 0d, longjmp. */
4259DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4260{
4261 NOREF(iSegReg); NOREF(fAccess);
4262 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4263}
4264#endif
4265
4266
4267/** \#PF(n) - 0e. */
4268VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4269{
4270 uint16_t uErr;
4271 switch (rc)
4272 {
4273 case VERR_PAGE_NOT_PRESENT:
4274 case VERR_PAGE_TABLE_NOT_PRESENT:
4275 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4276 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4277 uErr = 0;
4278 break;
4279
4280 default:
4281 AssertMsgFailed(("%Rrc\n", rc));
4282 RT_FALL_THRU();
4283 case VERR_ACCESS_DENIED:
4284 uErr = X86_TRAP_PF_P;
4285 break;
4286
4287 /** @todo reserved */
4288 }
4289
4290 if (IEM_GET_CPL(pVCpu) == 3)
4291 uErr |= X86_TRAP_PF_US;
4292
4293 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4294 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4295 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4296 uErr |= X86_TRAP_PF_ID;
4297
4298#if 0 /* This is so much non-sense, really. Why was it done like that? */
4299 /* Note! RW access callers reporting a WRITE protection fault, will clear
4300 the READ flag before calling. So, read-modify-write accesses (RW)
4301 can safely be reported as READ faults. */
4302 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4303 uErr |= X86_TRAP_PF_RW;
4304#else
4305 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4306 {
4307 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4308 /// (regardless of outcome of the comparison in the latter case).
4309 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4310 uErr |= X86_TRAP_PF_RW;
4311 }
4312#endif
4313
4314 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4315 of the memory operand rather than at the start of it. (Not sure what
4316 happens if it crosses a page boundrary.) The current heuristics for
4317 this is to report the #PF for the last byte if the access is more than
4318 64 bytes. This is probably not correct, but we can work that out later,
4319 main objective now is to get FXSAVE to work like for real hardware and
4320 make bs3-cpu-basic2 work. */
4321 if (cbAccess <= 64)
4322 { /* likely*/ }
4323 else
4324 GCPtrWhere += cbAccess - 1;
4325
4326 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4327 uErr, GCPtrWhere);
4328}
4329
4330#ifdef IEM_WITH_SETJMP
4331/** \#PF(n) - 0e, longjmp. */
4332DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4333 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4334{
4335 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4336}
4337#endif
4338
4339
4340/** \#MF(0) - 10. */
4341VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4342{
4343 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4344 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4345
4346 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4347 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4348 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4349}
4350
4351
4352/** \#AC(0) - 11. */
4353VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4354{
4355 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4356}
4357
4358#ifdef IEM_WITH_SETJMP
4359/** \#AC(0) - 11, longjmp. */
4360DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4361{
4362 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4363}
4364#endif
4365
4366
4367/** \#XF(0)/\#XM(0) - 19. */
4368VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4369{
4370 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4371}
4372
4373
4374/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4375IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4376{
4377 NOREF(cbInstr);
4378 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4379}
4380
4381
4382/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4383IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4384{
4385 NOREF(cbInstr);
4386 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4387}
4388
4389
4390/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4391IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4392{
4393 NOREF(cbInstr);
4394 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4395}
4396
4397
4398/** @} */
4399
4400/** @name Common opcode decoders.
4401 * @{
4402 */
4403//#include <iprt/mem.h>
4404
4405/**
4406 * Used to add extra details about a stub case.
4407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4408 */
4409void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4410{
4411#if defined(LOG_ENABLED) && defined(IN_RING3)
4412 PVM pVM = pVCpu->CTX_SUFF(pVM);
4413 char szRegs[4096];
4414 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4415 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4416 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4417 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4418 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4419 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4420 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4421 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4422 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4423 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4424 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4425 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4426 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4427 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4428 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4429 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4430 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4431 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4432 " efer=%016VR{efer}\n"
4433 " pat=%016VR{pat}\n"
4434 " sf_mask=%016VR{sf_mask}\n"
4435 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4436 " lstar=%016VR{lstar}\n"
4437 " star=%016VR{star} cstar=%016VR{cstar}\n"
4438 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4439 );
4440
4441 char szInstr[256];
4442 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4443 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4444 szInstr, sizeof(szInstr), NULL);
4445
4446 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4447#else
4448 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4449#endif
4450}
4451
4452/** @} */
4453
4454
4455
4456/** @name Register Access.
4457 * @{
4458 */
4459
4460/**
4461 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4462 *
4463 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4464 * segment limit.
4465 *
4466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4467 * @param cbInstr Instruction size.
4468 * @param offNextInstr The offset of the next instruction.
4469 * @param enmEffOpSize Effective operand size.
4470 */
4471VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4472 IEMMODE enmEffOpSize) RT_NOEXCEPT
4473{
4474 switch (enmEffOpSize)
4475 {
4476 case IEMMODE_16BIT:
4477 {
4478 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4479 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4480 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4481 pVCpu->cpum.GstCtx.rip = uNewIp;
4482 else
4483 return iemRaiseGeneralProtectionFault0(pVCpu);
4484 break;
4485 }
4486
4487 case IEMMODE_32BIT:
4488 {
4489 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4490 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4491
4492 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4493 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4494 pVCpu->cpum.GstCtx.rip = uNewEip;
4495 else
4496 return iemRaiseGeneralProtectionFault0(pVCpu);
4497 break;
4498 }
4499
4500 case IEMMODE_64BIT:
4501 {
4502 Assert(IEM_IS_64BIT_CODE(pVCpu));
4503
4504 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4505 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4506 pVCpu->cpum.GstCtx.rip = uNewRip;
4507 else
4508 return iemRaiseGeneralProtectionFault0(pVCpu);
4509 break;
4510 }
4511
4512 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4513 }
4514
4515#ifndef IEM_WITH_CODE_TLB
4516 /* Flush the prefetch buffer. */
4517 pVCpu->iem.s.cbOpcode = cbInstr;
4518#endif
4519
4520 /*
4521 * Clear RF and finish the instruction (maybe raise #DB).
4522 */
4523 return iemRegFinishClearingRF(pVCpu);
4524}
4525
4526
4527/**
4528 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4529 *
4530 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4531 * segment limit.
4532 *
4533 * @returns Strict VBox status code.
4534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4535 * @param cbInstr Instruction size.
4536 * @param offNextInstr The offset of the next instruction.
4537 */
4538VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4539{
4540 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4541
4542 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4543 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4544 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4545 pVCpu->cpum.GstCtx.rip = uNewIp;
4546 else
4547 return iemRaiseGeneralProtectionFault0(pVCpu);
4548
4549#ifndef IEM_WITH_CODE_TLB
4550 /* Flush the prefetch buffer. */
4551 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4552#endif
4553
4554 /*
4555 * Clear RF and finish the instruction (maybe raise #DB).
4556 */
4557 return iemRegFinishClearingRF(pVCpu);
4558}
4559
4560
4561/**
4562 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4563 *
4564 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4565 * segment limit.
4566 *
4567 * @returns Strict VBox status code.
4568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4569 * @param cbInstr Instruction size.
4570 * @param offNextInstr The offset of the next instruction.
4571 * @param enmEffOpSize Effective operand size.
4572 */
4573VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4574 IEMMODE enmEffOpSize) RT_NOEXCEPT
4575{
4576 if (enmEffOpSize == IEMMODE_32BIT)
4577 {
4578 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4579
4580 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4581 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4582 pVCpu->cpum.GstCtx.rip = uNewEip;
4583 else
4584 return iemRaiseGeneralProtectionFault0(pVCpu);
4585 }
4586 else
4587 {
4588 Assert(enmEffOpSize == IEMMODE_64BIT);
4589
4590 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4591 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4592 pVCpu->cpum.GstCtx.rip = uNewRip;
4593 else
4594 return iemRaiseGeneralProtectionFault0(pVCpu);
4595 }
4596
4597#ifndef IEM_WITH_CODE_TLB
4598 /* Flush the prefetch buffer. */
4599 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4600#endif
4601
4602 /*
4603 * Clear RF and finish the instruction (maybe raise #DB).
4604 */
4605 return iemRegFinishClearingRF(pVCpu);
4606}
4607
4608
4609/**
4610 * Performs a near jump to the specified address.
4611 *
4612 * May raise a \#GP(0) if the new IP outside the code segment limit.
4613 *
4614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4615 * @param uNewIp The new IP value.
4616 */
4617VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4618{
4619 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4620 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4621 pVCpu->cpum.GstCtx.rip = uNewIp;
4622 else
4623 return iemRaiseGeneralProtectionFault0(pVCpu);
4624 /** @todo Test 16-bit jump in 64-bit mode. */
4625
4626#ifndef IEM_WITH_CODE_TLB
4627 /* Flush the prefetch buffer. */
4628 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4629#endif
4630
4631 /*
4632 * Clear RF and finish the instruction (maybe raise #DB).
4633 */
4634 return iemRegFinishClearingRF(pVCpu);
4635}
4636
4637
4638/**
4639 * Performs a near jump to the specified address.
4640 *
4641 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4642 *
4643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4644 * @param uNewEip The new EIP value.
4645 */
4646VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4647{
4648 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4649 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4650
4651 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4652 pVCpu->cpum.GstCtx.rip = uNewEip;
4653 else
4654 return iemRaiseGeneralProtectionFault0(pVCpu);
4655
4656#ifndef IEM_WITH_CODE_TLB
4657 /* Flush the prefetch buffer. */
4658 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4659#endif
4660
4661 /*
4662 * Clear RF and finish the instruction (maybe raise #DB).
4663 */
4664 return iemRegFinishClearingRF(pVCpu);
4665}
4666
4667
4668/**
4669 * Performs a near jump to the specified address.
4670 *
4671 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4672 * segment limit.
4673 *
4674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4675 * @param uNewRip The new RIP value.
4676 */
4677VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4678{
4679 Assert(IEM_IS_64BIT_CODE(pVCpu));
4680
4681 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4682 pVCpu->cpum.GstCtx.rip = uNewRip;
4683 else
4684 return iemRaiseGeneralProtectionFault0(pVCpu);
4685
4686#ifndef IEM_WITH_CODE_TLB
4687 /* Flush the prefetch buffer. */
4688 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4689#endif
4690
4691 /*
4692 * Clear RF and finish the instruction (maybe raise #DB).
4693 */
4694 return iemRegFinishClearingRF(pVCpu);
4695}
4696
4697/** @} */
4698
4699
4700/** @name FPU access and helpers.
4701 *
4702 * @{
4703 */
4704
4705/**
4706 * Updates the x87.DS and FPUDP registers.
4707 *
4708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4709 * @param pFpuCtx The FPU context.
4710 * @param iEffSeg The effective segment register.
4711 * @param GCPtrEff The effective address relative to @a iEffSeg.
4712 */
4713DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4714{
4715 RTSEL sel;
4716 switch (iEffSeg)
4717 {
4718 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4719 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4720 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4721 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4722 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4723 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4724 default:
4725 AssertMsgFailed(("%d\n", iEffSeg));
4726 sel = pVCpu->cpum.GstCtx.ds.Sel;
4727 }
4728 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4729 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4730 {
4731 pFpuCtx->DS = 0;
4732 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4733 }
4734 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4735 {
4736 pFpuCtx->DS = sel;
4737 pFpuCtx->FPUDP = GCPtrEff;
4738 }
4739 else
4740 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4741}
4742
4743
4744/**
4745 * Rotates the stack registers in the push direction.
4746 *
4747 * @param pFpuCtx The FPU context.
4748 * @remarks This is a complete waste of time, but fxsave stores the registers in
4749 * stack order.
4750 */
4751DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4752{
4753 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4754 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4755 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4756 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4757 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4758 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4759 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4760 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4761 pFpuCtx->aRegs[0].r80 = r80Tmp;
4762}
4763
4764
4765/**
4766 * Rotates the stack registers in the pop direction.
4767 *
4768 * @param pFpuCtx The FPU context.
4769 * @remarks This is a complete waste of time, but fxsave stores the registers in
4770 * stack order.
4771 */
4772DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4773{
4774 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4775 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4776 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4777 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4778 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4779 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4780 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4781 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4782 pFpuCtx->aRegs[7].r80 = r80Tmp;
4783}
4784
4785
4786/**
4787 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4788 * exception prevents it.
4789 *
4790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4791 * @param pResult The FPU operation result to push.
4792 * @param pFpuCtx The FPU context.
4793 */
4794static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4795{
4796 /* Update FSW and bail if there are pending exceptions afterwards. */
4797 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4798 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4799 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4800 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4801 {
4802 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4803 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4804 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4805 pFpuCtx->FSW = fFsw;
4806 return;
4807 }
4808
4809 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4810 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4811 {
4812 /* All is fine, push the actual value. */
4813 pFpuCtx->FTW |= RT_BIT(iNewTop);
4814 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4815 }
4816 else if (pFpuCtx->FCW & X86_FCW_IM)
4817 {
4818 /* Masked stack overflow, push QNaN. */
4819 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4820 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4821 }
4822 else
4823 {
4824 /* Raise stack overflow, don't push anything. */
4825 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4826 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4827 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4828 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4829 return;
4830 }
4831
4832 fFsw &= ~X86_FSW_TOP_MASK;
4833 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4834 pFpuCtx->FSW = fFsw;
4835
4836 iemFpuRotateStackPush(pFpuCtx);
4837 RT_NOREF(pVCpu);
4838}
4839
4840
4841/**
4842 * Stores a result in a FPU register and updates the FSW and FTW.
4843 *
4844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4845 * @param pFpuCtx The FPU context.
4846 * @param pResult The result to store.
4847 * @param iStReg Which FPU register to store it in.
4848 */
4849static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4850{
4851 Assert(iStReg < 8);
4852 uint16_t fNewFsw = pFpuCtx->FSW;
4853 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4854 fNewFsw &= ~X86_FSW_C_MASK;
4855 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4856 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4857 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4858 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4859 pFpuCtx->FSW = fNewFsw;
4860 pFpuCtx->FTW |= RT_BIT(iReg);
4861 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4862 RT_NOREF(pVCpu);
4863}
4864
4865
4866/**
4867 * Only updates the FPU status word (FSW) with the result of the current
4868 * instruction.
4869 *
4870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4871 * @param pFpuCtx The FPU context.
4872 * @param u16FSW The FSW output of the current instruction.
4873 */
4874static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4875{
4876 uint16_t fNewFsw = pFpuCtx->FSW;
4877 fNewFsw &= ~X86_FSW_C_MASK;
4878 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4879 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4880 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4881 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4882 pFpuCtx->FSW = fNewFsw;
4883 RT_NOREF(pVCpu);
4884}
4885
4886
4887/**
4888 * Pops one item off the FPU stack if no pending exception prevents it.
4889 *
4890 * @param pFpuCtx The FPU context.
4891 */
4892static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4893{
4894 /* Check pending exceptions. */
4895 uint16_t uFSW = pFpuCtx->FSW;
4896 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4897 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4898 return;
4899
4900 /* TOP--. */
4901 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4902 uFSW &= ~X86_FSW_TOP_MASK;
4903 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4904 pFpuCtx->FSW = uFSW;
4905
4906 /* Mark the previous ST0 as empty. */
4907 iOldTop >>= X86_FSW_TOP_SHIFT;
4908 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4909
4910 /* Rotate the registers. */
4911 iemFpuRotateStackPop(pFpuCtx);
4912}
4913
4914
4915/**
4916 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4917 *
4918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4919 * @param pResult The FPU operation result to push.
4920 */
4921void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4922{
4923 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4924 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4925 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4926}
4927
4928
4929/**
4930 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4931 * and sets FPUDP and FPUDS.
4932 *
4933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4934 * @param pResult The FPU operation result to push.
4935 * @param iEffSeg The effective segment register.
4936 * @param GCPtrEff The effective address relative to @a iEffSeg.
4937 */
4938void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4939{
4940 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4941 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4942 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4943 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4944}
4945
4946
4947/**
4948 * Replace ST0 with the first value and push the second onto the FPU stack,
4949 * unless a pending exception prevents it.
4950 *
4951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4952 * @param pResult The FPU operation result to store and push.
4953 */
4954void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4955{
4956 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4957 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4958
4959 /* Update FSW and bail if there are pending exceptions afterwards. */
4960 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4961 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4962 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4963 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4964 {
4965 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4966 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4967 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4968 pFpuCtx->FSW = fFsw;
4969 return;
4970 }
4971
4972 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4973 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4974 {
4975 /* All is fine, push the actual value. */
4976 pFpuCtx->FTW |= RT_BIT(iNewTop);
4977 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4978 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4979 }
4980 else if (pFpuCtx->FCW & X86_FCW_IM)
4981 {
4982 /* Masked stack overflow, push QNaN. */
4983 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4984 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4985 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4986 }
4987 else
4988 {
4989 /* Raise stack overflow, don't push anything. */
4990 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4991 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4992 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4993 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4994 return;
4995 }
4996
4997 fFsw &= ~X86_FSW_TOP_MASK;
4998 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4999 pFpuCtx->FSW = fFsw;
5000
5001 iemFpuRotateStackPush(pFpuCtx);
5002}
5003
5004
5005/**
5006 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5007 * FOP.
5008 *
5009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5010 * @param pResult The result to store.
5011 * @param iStReg Which FPU register to store it in.
5012 */
5013void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5014{
5015 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5016 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5017 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5018}
5019
5020
5021/**
5022 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5023 * FOP, and then pops the stack.
5024 *
5025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5026 * @param pResult The result to store.
5027 * @param iStReg Which FPU register to store it in.
5028 */
5029void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5030{
5031 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5032 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5033 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5034 iemFpuMaybePopOne(pFpuCtx);
5035}
5036
5037
5038/**
5039 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5040 * FPUDP, and FPUDS.
5041 *
5042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5043 * @param pResult The result to store.
5044 * @param iStReg Which FPU register to store it in.
5045 * @param iEffSeg The effective memory operand selector register.
5046 * @param GCPtrEff The effective memory operand offset.
5047 */
5048void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5049 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5050{
5051 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5052 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5053 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5054 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5055}
5056
5057
5058/**
5059 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5060 * FPUDP, and FPUDS, and then pops the stack.
5061 *
5062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5063 * @param pResult The result to store.
5064 * @param iStReg Which FPU register to store it in.
5065 * @param iEffSeg The effective memory operand selector register.
5066 * @param GCPtrEff The effective memory operand offset.
5067 */
5068void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5069 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5070{
5071 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5072 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5073 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5074 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5075 iemFpuMaybePopOne(pFpuCtx);
5076}
5077
5078
5079/**
5080 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5081 *
5082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5083 */
5084void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
5085{
5086 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5087 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5088}
5089
5090
5091/**
5092 * Updates the FSW, FOP, FPUIP, and FPUCS.
5093 *
5094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5095 * @param u16FSW The FSW from the current instruction.
5096 */
5097void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5098{
5099 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5100 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5101 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5102}
5103
5104
5105/**
5106 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5107 *
5108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5109 * @param u16FSW The FSW from the current instruction.
5110 */
5111void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5112{
5113 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5114 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5115 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5116 iemFpuMaybePopOne(pFpuCtx);
5117}
5118
5119
5120/**
5121 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5122 *
5123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5124 * @param u16FSW The FSW from the current instruction.
5125 * @param iEffSeg The effective memory operand selector register.
5126 * @param GCPtrEff The effective memory operand offset.
5127 */
5128void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5129{
5130 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5131 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5132 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5133 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5134}
5135
5136
5137/**
5138 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5139 *
5140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5141 * @param u16FSW The FSW from the current instruction.
5142 */
5143void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5144{
5145 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5146 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5147 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5148 iemFpuMaybePopOne(pFpuCtx);
5149 iemFpuMaybePopOne(pFpuCtx);
5150}
5151
5152
5153/**
5154 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5155 *
5156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5157 * @param u16FSW The FSW from the current instruction.
5158 * @param iEffSeg The effective memory operand selector register.
5159 * @param GCPtrEff The effective memory operand offset.
5160 */
5161void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5162{
5163 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5164 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5165 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5166 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5167 iemFpuMaybePopOne(pFpuCtx);
5168}
5169
5170
5171/**
5172 * Worker routine for raising an FPU stack underflow exception.
5173 *
5174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5175 * @param pFpuCtx The FPU context.
5176 * @param iStReg The stack register being accessed.
5177 */
5178static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5179{
5180 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5181 if (pFpuCtx->FCW & X86_FCW_IM)
5182 {
5183 /* Masked underflow. */
5184 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5185 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5186 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5187 if (iStReg != UINT8_MAX)
5188 {
5189 pFpuCtx->FTW |= RT_BIT(iReg);
5190 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5191 }
5192 }
5193 else
5194 {
5195 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5196 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5197 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5198 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5199 }
5200 RT_NOREF(pVCpu);
5201}
5202
5203
5204/**
5205 * Raises a FPU stack underflow exception.
5206 *
5207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5208 * @param iStReg The destination register that should be loaded
5209 * with QNaN if \#IS is not masked. Specify
5210 * UINT8_MAX if none (like for fcom).
5211 */
5212void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5213{
5214 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5215 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5216 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5217}
5218
5219
5220void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5221{
5222 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5223 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5224 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5225 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5226}
5227
5228
5229void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5230{
5231 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5232 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5233 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5234 iemFpuMaybePopOne(pFpuCtx);
5235}
5236
5237
5238void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5239{
5240 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5241 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5242 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5243 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5244 iemFpuMaybePopOne(pFpuCtx);
5245}
5246
5247
5248void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5249{
5250 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5251 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5252 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5253 iemFpuMaybePopOne(pFpuCtx);
5254 iemFpuMaybePopOne(pFpuCtx);
5255}
5256
5257
5258void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5259{
5260 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5261 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5262
5263 if (pFpuCtx->FCW & X86_FCW_IM)
5264 {
5265 /* Masked overflow - Push QNaN. */
5266 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5267 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5268 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5269 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5270 pFpuCtx->FTW |= RT_BIT(iNewTop);
5271 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5272 iemFpuRotateStackPush(pFpuCtx);
5273 }
5274 else
5275 {
5276 /* Exception pending - don't change TOP or the register stack. */
5277 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5278 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5279 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5280 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5281 }
5282}
5283
5284
5285void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5286{
5287 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5288 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5289
5290 if (pFpuCtx->FCW & X86_FCW_IM)
5291 {
5292 /* Masked overflow - Push QNaN. */
5293 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5294 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5295 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5296 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5297 pFpuCtx->FTW |= RT_BIT(iNewTop);
5298 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5299 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5300 iemFpuRotateStackPush(pFpuCtx);
5301 }
5302 else
5303 {
5304 /* Exception pending - don't change TOP or the register stack. */
5305 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5306 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5307 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5308 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5309 }
5310}
5311
5312
5313/**
5314 * Worker routine for raising an FPU stack overflow exception on a push.
5315 *
5316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5317 * @param pFpuCtx The FPU context.
5318 */
5319static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5320{
5321 if (pFpuCtx->FCW & X86_FCW_IM)
5322 {
5323 /* Masked overflow. */
5324 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5325 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5326 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5327 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5328 pFpuCtx->FTW |= RT_BIT(iNewTop);
5329 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5330 iemFpuRotateStackPush(pFpuCtx);
5331 }
5332 else
5333 {
5334 /* Exception pending - don't change TOP or the register stack. */
5335 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5336 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5337 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5338 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5339 }
5340 RT_NOREF(pVCpu);
5341}
5342
5343
5344/**
5345 * Raises a FPU stack overflow exception on a push.
5346 *
5347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5348 */
5349void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5350{
5351 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5352 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5353 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5354}
5355
5356
5357/**
5358 * Raises a FPU stack overflow exception on a push with a memory operand.
5359 *
5360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5361 * @param iEffSeg The effective memory operand selector register.
5362 * @param GCPtrEff The effective memory operand offset.
5363 */
5364void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5365{
5366 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5367 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5368 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5369 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5370}
5371
5372/** @} */
5373
5374
5375/** @name SSE+AVX SIMD access and helpers.
5376 *
5377 * @{
5378 */
5379/**
5380 * Stores a result in a SIMD XMM register, updates the MXCSR.
5381 *
5382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5383 * @param pResult The result to store.
5384 * @param iXmmReg Which SIMD XMM register to store the result in.
5385 */
5386void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5387{
5388 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5389 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5390
5391 /* The result is only updated if there is no unmasked exception pending. */
5392 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5393 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5394 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5395}
5396
5397
5398/**
5399 * Updates the MXCSR.
5400 *
5401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5402 * @param fMxcsr The new MXCSR value.
5403 */
5404void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5405{
5406 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5407 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5408}
5409/** @} */
5410
5411
5412/** @name Memory access.
5413 *
5414 * @{
5415 */
5416
5417
5418/**
5419 * Updates the IEMCPU::cbWritten counter if applicable.
5420 *
5421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5422 * @param fAccess The access being accounted for.
5423 * @param cbMem The access size.
5424 */
5425DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5426{
5427 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5428 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5429 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5430}
5431
5432
5433/**
5434 * Applies the segment limit, base and attributes.
5435 *
5436 * This may raise a \#GP or \#SS.
5437 *
5438 * @returns VBox strict status code.
5439 *
5440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5441 * @param fAccess The kind of access which is being performed.
5442 * @param iSegReg The index of the segment register to apply.
5443 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5444 * TSS, ++).
5445 * @param cbMem The access size.
5446 * @param pGCPtrMem Pointer to the guest memory address to apply
5447 * segmentation to. Input and output parameter.
5448 */
5449VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5450{
5451 if (iSegReg == UINT8_MAX)
5452 return VINF_SUCCESS;
5453
5454 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5455 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5456 switch (IEM_GET_CPU_MODE(pVCpu))
5457 {
5458 case IEMMODE_16BIT:
5459 case IEMMODE_32BIT:
5460 {
5461 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5462 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5463
5464 if ( pSel->Attr.n.u1Present
5465 && !pSel->Attr.n.u1Unusable)
5466 {
5467 Assert(pSel->Attr.n.u1DescType);
5468 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5469 {
5470 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5471 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5472 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5473
5474 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5475 {
5476 /** @todo CPL check. */
5477 }
5478
5479 /*
5480 * There are two kinds of data selectors, normal and expand down.
5481 */
5482 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5483 {
5484 if ( GCPtrFirst32 > pSel->u32Limit
5485 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5486 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5487 }
5488 else
5489 {
5490 /*
5491 * The upper boundary is defined by the B bit, not the G bit!
5492 */
5493 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5494 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5495 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5496 }
5497 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5498 }
5499 else
5500 {
5501 /*
5502 * Code selector and usually be used to read thru, writing is
5503 * only permitted in real and V8086 mode.
5504 */
5505 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5506 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5507 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5508 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5509 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5510
5511 if ( GCPtrFirst32 > pSel->u32Limit
5512 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5513 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5514
5515 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5516 {
5517 /** @todo CPL check. */
5518 }
5519
5520 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5521 }
5522 }
5523 else
5524 return iemRaiseGeneralProtectionFault0(pVCpu);
5525 return VINF_SUCCESS;
5526 }
5527
5528 case IEMMODE_64BIT:
5529 {
5530 RTGCPTR GCPtrMem = *pGCPtrMem;
5531 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5532 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5533
5534 Assert(cbMem >= 1);
5535 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5536 return VINF_SUCCESS;
5537 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5538 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5539 return iemRaiseGeneralProtectionFault0(pVCpu);
5540 }
5541
5542 default:
5543 AssertFailedReturn(VERR_IEM_IPE_7);
5544 }
5545}
5546
5547
5548/**
5549 * Translates a virtual address to a physical physical address and checks if we
5550 * can access the page as specified.
5551 *
5552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5553 * @param GCPtrMem The virtual address.
5554 * @param cbAccess The access size, for raising \#PF correctly for
5555 * FXSAVE and such.
5556 * @param fAccess The intended access.
5557 * @param pGCPhysMem Where to return the physical address.
5558 */
5559VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5560 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5561{
5562 /** @todo Need a different PGM interface here. We're currently using
5563 * generic / REM interfaces. this won't cut it for R0. */
5564 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5565 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5566 * here. */
5567 PGMPTWALK Walk;
5568 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5569 if (RT_FAILURE(rc))
5570 {
5571 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5572 /** @todo Check unassigned memory in unpaged mode. */
5573 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5574#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5575 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5576 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5577#endif
5578 *pGCPhysMem = NIL_RTGCPHYS;
5579 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5580 }
5581
5582 /* If the page is writable and does not have the no-exec bit set, all
5583 access is allowed. Otherwise we'll have to check more carefully... */
5584 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5585 {
5586 /* Write to read only memory? */
5587 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5588 && !(Walk.fEffective & X86_PTE_RW)
5589 && ( ( IEM_GET_CPL(pVCpu) == 3
5590 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5591 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5592 {
5593 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5594 *pGCPhysMem = NIL_RTGCPHYS;
5595#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5596 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5597 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5598#endif
5599 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5600 }
5601
5602 /* Kernel memory accessed by userland? */
5603 if ( !(Walk.fEffective & X86_PTE_US)
5604 && IEM_GET_CPL(pVCpu) == 3
5605 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5606 {
5607 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5608 *pGCPhysMem = NIL_RTGCPHYS;
5609#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5610 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5611 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5612#endif
5613 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5614 }
5615
5616 /* Executing non-executable memory? */
5617 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5618 && (Walk.fEffective & X86_PTE_PAE_NX)
5619 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5620 {
5621 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5622 *pGCPhysMem = NIL_RTGCPHYS;
5623#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5624 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5625 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5626#endif
5627 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5628 VERR_ACCESS_DENIED);
5629 }
5630 }
5631
5632 /*
5633 * Set the dirty / access flags.
5634 * ASSUMES this is set when the address is translated rather than on committ...
5635 */
5636 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5637 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5638 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5639 {
5640 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5641 AssertRC(rc2);
5642 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5643 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5644 }
5645
5646 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5647 *pGCPhysMem = GCPhys;
5648 return VINF_SUCCESS;
5649}
5650
5651
5652/**
5653 * Looks up a memory mapping entry.
5654 *
5655 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5657 * @param pvMem The memory address.
5658 * @param fAccess The access to.
5659 */
5660DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5661{
5662 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5663 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5664 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5665 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5666 return 0;
5667 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5668 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5669 return 1;
5670 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5671 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5672 return 2;
5673 return VERR_NOT_FOUND;
5674}
5675
5676
5677/**
5678 * Finds a free memmap entry when using iNextMapping doesn't work.
5679 *
5680 * @returns Memory mapping index, 1024 on failure.
5681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5682 */
5683static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5684{
5685 /*
5686 * The easy case.
5687 */
5688 if (pVCpu->iem.s.cActiveMappings == 0)
5689 {
5690 pVCpu->iem.s.iNextMapping = 1;
5691 return 0;
5692 }
5693
5694 /* There should be enough mappings for all instructions. */
5695 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5696
5697 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5698 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5699 return i;
5700
5701 AssertFailedReturn(1024);
5702}
5703
5704
5705/**
5706 * Commits a bounce buffer that needs writing back and unmaps it.
5707 *
5708 * @returns Strict VBox status code.
5709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5710 * @param iMemMap The index of the buffer to commit.
5711 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5712 * Always false in ring-3, obviously.
5713 */
5714static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5715{
5716 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5717 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5718#ifdef IN_RING3
5719 Assert(!fPostponeFail);
5720 RT_NOREF_PV(fPostponeFail);
5721#endif
5722
5723 /*
5724 * Do the writing.
5725 */
5726 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5727 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5728 {
5729 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5730 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5731 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5732 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5733 {
5734 /*
5735 * Carefully and efficiently dealing with access handler return
5736 * codes make this a little bloated.
5737 */
5738 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5739 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5740 pbBuf,
5741 cbFirst,
5742 PGMACCESSORIGIN_IEM);
5743 if (rcStrict == VINF_SUCCESS)
5744 {
5745 if (cbSecond)
5746 {
5747 rcStrict = PGMPhysWrite(pVM,
5748 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5749 pbBuf + cbFirst,
5750 cbSecond,
5751 PGMACCESSORIGIN_IEM);
5752 if (rcStrict == VINF_SUCCESS)
5753 { /* nothing */ }
5754 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5755 {
5756 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5759 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5760 }
5761#ifndef IN_RING3
5762 else if (fPostponeFail)
5763 {
5764 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5765 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5766 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5767 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5768 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5769 return iemSetPassUpStatus(pVCpu, rcStrict);
5770 }
5771#endif
5772 else
5773 {
5774 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5775 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5776 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5777 return rcStrict;
5778 }
5779 }
5780 }
5781 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5782 {
5783 if (!cbSecond)
5784 {
5785 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5786 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5787 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5788 }
5789 else
5790 {
5791 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5792 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5793 pbBuf + cbFirst,
5794 cbSecond,
5795 PGMACCESSORIGIN_IEM);
5796 if (rcStrict2 == VINF_SUCCESS)
5797 {
5798 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5799 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5800 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5801 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5802 }
5803 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5804 {
5805 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5808 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5809 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5810 }
5811#ifndef IN_RING3
5812 else if (fPostponeFail)
5813 {
5814 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5815 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5816 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5817 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5818 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5819 return iemSetPassUpStatus(pVCpu, rcStrict);
5820 }
5821#endif
5822 else
5823 {
5824 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5825 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5826 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5827 return rcStrict2;
5828 }
5829 }
5830 }
5831#ifndef IN_RING3
5832 else if (fPostponeFail)
5833 {
5834 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5835 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5836 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5837 if (!cbSecond)
5838 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5839 else
5840 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5841 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5842 return iemSetPassUpStatus(pVCpu, rcStrict);
5843 }
5844#endif
5845 else
5846 {
5847 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5849 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5850 return rcStrict;
5851 }
5852 }
5853 else
5854 {
5855 /*
5856 * No access handlers, much simpler.
5857 */
5858 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5859 if (RT_SUCCESS(rc))
5860 {
5861 if (cbSecond)
5862 {
5863 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5864 if (RT_SUCCESS(rc))
5865 { /* likely */ }
5866 else
5867 {
5868 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5869 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5870 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5871 return rc;
5872 }
5873 }
5874 }
5875 else
5876 {
5877 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5879 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5880 return rc;
5881 }
5882 }
5883 }
5884
5885#if defined(IEM_LOG_MEMORY_WRITES)
5886 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5887 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5888 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5889 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5890 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5891 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5892
5893 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5894 g_cbIemWrote = cbWrote;
5895 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5896#endif
5897
5898 /*
5899 * Free the mapping entry.
5900 */
5901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5902 Assert(pVCpu->iem.s.cActiveMappings != 0);
5903 pVCpu->iem.s.cActiveMappings--;
5904 return VINF_SUCCESS;
5905}
5906
5907
5908/**
5909 * iemMemMap worker that deals with a request crossing pages.
5910 */
5911static VBOXSTRICTRC
5912iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5913{
5914 Assert(cbMem <= GUEST_PAGE_SIZE);
5915
5916 /*
5917 * Do the address translations.
5918 */
5919 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5920 RTGCPHYS GCPhysFirst;
5921 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5922 if (rcStrict != VINF_SUCCESS)
5923 return rcStrict;
5924 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5925
5926 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5927 RTGCPHYS GCPhysSecond;
5928 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5929 cbSecondPage, fAccess, &GCPhysSecond);
5930 if (rcStrict != VINF_SUCCESS)
5931 return rcStrict;
5932 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5933 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5934
5935 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5936
5937 /*
5938 * Read in the current memory content if it's a read, execute or partial
5939 * write access.
5940 */
5941 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5942
5943 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5944 {
5945 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5946 {
5947 /*
5948 * Must carefully deal with access handler status codes here,
5949 * makes the code a bit bloated.
5950 */
5951 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5952 if (rcStrict == VINF_SUCCESS)
5953 {
5954 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5955 if (rcStrict == VINF_SUCCESS)
5956 { /*likely */ }
5957 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5958 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5959 else
5960 {
5961 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5962 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5963 return rcStrict;
5964 }
5965 }
5966 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5967 {
5968 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5969 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5970 {
5971 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5972 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5973 }
5974 else
5975 {
5976 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5977 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5978 return rcStrict2;
5979 }
5980 }
5981 else
5982 {
5983 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5984 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5985 return rcStrict;
5986 }
5987 }
5988 else
5989 {
5990 /*
5991 * No informational status codes here, much more straight forward.
5992 */
5993 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5994 if (RT_SUCCESS(rc))
5995 {
5996 Assert(rc == VINF_SUCCESS);
5997 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5998 if (RT_SUCCESS(rc))
5999 Assert(rc == VINF_SUCCESS);
6000 else
6001 {
6002 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6003 return rc;
6004 }
6005 }
6006 else
6007 {
6008 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6009 return rc;
6010 }
6011 }
6012 }
6013#ifdef VBOX_STRICT
6014 else
6015 memset(pbBuf, 0xcc, cbMem);
6016 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6017 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6018#endif
6019 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6020
6021 /*
6022 * Commit the bounce buffer entry.
6023 */
6024 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6025 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6026 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6027 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6028 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6029 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6030 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6031 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6032 pVCpu->iem.s.cActiveMappings++;
6033
6034 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6035 *ppvMem = pbBuf;
6036 return VINF_SUCCESS;
6037}
6038
6039
6040/**
6041 * iemMemMap woker that deals with iemMemPageMap failures.
6042 */
6043static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6044 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6045{
6046 /*
6047 * Filter out conditions we can handle and the ones which shouldn't happen.
6048 */
6049 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6050 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6051 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6052 {
6053 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6054 return rcMap;
6055 }
6056 pVCpu->iem.s.cPotentialExits++;
6057
6058 /*
6059 * Read in the current memory content if it's a read, execute or partial
6060 * write access.
6061 */
6062 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6063 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6064 {
6065 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6066 memset(pbBuf, 0xff, cbMem);
6067 else
6068 {
6069 int rc;
6070 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6071 {
6072 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6073 if (rcStrict == VINF_SUCCESS)
6074 { /* nothing */ }
6075 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6076 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6077 else
6078 {
6079 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6080 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6081 return rcStrict;
6082 }
6083 }
6084 else
6085 {
6086 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6087 if (RT_SUCCESS(rc))
6088 { /* likely */ }
6089 else
6090 {
6091 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6092 GCPhysFirst, rc));
6093 return rc;
6094 }
6095 }
6096 }
6097 }
6098#ifdef VBOX_STRICT
6099 else
6100 memset(pbBuf, 0xcc, cbMem);
6101#endif
6102#ifdef VBOX_STRICT
6103 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6104 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6105#endif
6106
6107 /*
6108 * Commit the bounce buffer entry.
6109 */
6110 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6111 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6112 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6113 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6114 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6115 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6116 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6117 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6118 pVCpu->iem.s.cActiveMappings++;
6119
6120 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6121 *ppvMem = pbBuf;
6122 return VINF_SUCCESS;
6123}
6124
6125
6126
6127/**
6128 * Maps the specified guest memory for the given kind of access.
6129 *
6130 * This may be using bounce buffering of the memory if it's crossing a page
6131 * boundary or if there is an access handler installed for any of it. Because
6132 * of lock prefix guarantees, we're in for some extra clutter when this
6133 * happens.
6134 *
6135 * This may raise a \#GP, \#SS, \#PF or \#AC.
6136 *
6137 * @returns VBox strict status code.
6138 *
6139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6140 * @param ppvMem Where to return the pointer to the mapped memory.
6141 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6142 * 8, 12, 16, 32 or 512. When used by string operations
6143 * it can be up to a page.
6144 * @param iSegReg The index of the segment register to use for this
6145 * access. The base and limits are checked. Use UINT8_MAX
6146 * to indicate that no segmentation is required (for IDT,
6147 * GDT and LDT accesses).
6148 * @param GCPtrMem The address of the guest memory.
6149 * @param fAccess How the memory is being accessed. The
6150 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6151 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6152 * when raising exceptions.
6153 * @param uAlignCtl Alignment control:
6154 * - Bits 15:0 is the alignment mask.
6155 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6156 * IEM_MEMMAP_F_ALIGN_SSE, and
6157 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6158 * Pass zero to skip alignment.
6159 */
6160VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6161 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6162{
6163 /*
6164 * Check the input and figure out which mapping entry to use.
6165 */
6166 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6167 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6168 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6169 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6170 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6171
6172 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6173 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6174 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6175 {
6176 iMemMap = iemMemMapFindFree(pVCpu);
6177 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6178 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6179 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6180 pVCpu->iem.s.aMemMappings[2].fAccess),
6181 VERR_IEM_IPE_9);
6182 }
6183
6184 /*
6185 * Map the memory, checking that we can actually access it. If something
6186 * slightly complicated happens, fall back on bounce buffering.
6187 */
6188 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6189 if (rcStrict == VINF_SUCCESS)
6190 { /* likely */ }
6191 else
6192 return rcStrict;
6193
6194 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6195 { /* likely */ }
6196 else
6197 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6198
6199 /*
6200 * Alignment check.
6201 */
6202 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6203 { /* likelyish */ }
6204 else
6205 {
6206 /* Misaligned access. */
6207 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6208 {
6209 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6210 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6211 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6212 {
6213 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6214
6215 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6216 return iemRaiseAlignmentCheckException(pVCpu);
6217 }
6218 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6219 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6220 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6221 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6222 * that's what FXSAVE does on a 10980xe. */
6223 && iemMemAreAlignmentChecksEnabled(pVCpu))
6224 return iemRaiseAlignmentCheckException(pVCpu);
6225 else
6226 return iemRaiseGeneralProtectionFault0(pVCpu);
6227 }
6228 }
6229
6230#ifdef IEM_WITH_DATA_TLB
6231 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6232
6233 /*
6234 * Get the TLB entry for this page.
6235 */
6236 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6237 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6238 if (pTlbe->uTag == uTag)
6239 {
6240# ifdef VBOX_WITH_STATISTICS
6241 pVCpu->iem.s.DataTlb.cTlbHits++;
6242# endif
6243 }
6244 else
6245 {
6246 pVCpu->iem.s.DataTlb.cTlbMisses++;
6247 PGMPTWALK Walk;
6248 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6249 if (RT_FAILURE(rc))
6250 {
6251 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6252# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6253 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6254 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6255# endif
6256 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6257 }
6258
6259 Assert(Walk.fSucceeded);
6260 pTlbe->uTag = uTag;
6261 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6262 pTlbe->GCPhys = Walk.GCPhys;
6263 pTlbe->pbMappingR3 = NULL;
6264 }
6265
6266 /*
6267 * Check TLB page table level access flags.
6268 */
6269 /* If the page is either supervisor only or non-writable, we need to do
6270 more careful access checks. */
6271 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6272 {
6273 /* Write to read only memory? */
6274 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6275 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6276 && ( ( IEM_GET_CPL(pVCpu) == 3
6277 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6278 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6279 {
6280 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6281# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6282 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6283 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6284# endif
6285 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6286 }
6287
6288 /* Kernel memory accessed by userland? */
6289 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6290 && IEM_GET_CPL(pVCpu) == 3
6291 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6292 {
6293 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6294# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6295 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6296 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6297# endif
6298 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6299 }
6300 }
6301
6302 /*
6303 * Set the dirty / access flags.
6304 * ASSUMES this is set when the address is translated rather than on commit...
6305 */
6306 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6307 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6308 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6309 {
6310 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6311 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6312 AssertRC(rc2);
6313 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6314 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6315 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6316 }
6317
6318 /*
6319 * Look up the physical page info if necessary.
6320 */
6321 uint8_t *pbMem = NULL;
6322 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6323# ifdef IN_RING3
6324 pbMem = pTlbe->pbMappingR3;
6325# else
6326 pbMem = NULL;
6327# endif
6328 else
6329 {
6330 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6331 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6332 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6333 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6334 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6335 { /* likely */ }
6336 else
6337 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6338 pTlbe->pbMappingR3 = NULL;
6339 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6340 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6341 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6342 &pbMem, &pTlbe->fFlagsAndPhysRev);
6343 AssertRCReturn(rc, rc);
6344# ifdef IN_RING3
6345 pTlbe->pbMappingR3 = pbMem;
6346# endif
6347 }
6348
6349 /*
6350 * Check the physical page level access and mapping.
6351 */
6352 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6353 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6354 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6355 { /* probably likely */ }
6356 else
6357 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6358 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6359 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6360 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6361 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6362 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6363
6364 if (pbMem)
6365 {
6366 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6367 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6368 fAccess |= IEM_ACCESS_NOT_LOCKED;
6369 }
6370 else
6371 {
6372 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6373 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6374 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6375 if (rcStrict != VINF_SUCCESS)
6376 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6377 }
6378
6379 void * const pvMem = pbMem;
6380
6381 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6382 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6383 if (fAccess & IEM_ACCESS_TYPE_READ)
6384 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6385
6386#else /* !IEM_WITH_DATA_TLB */
6387
6388 RTGCPHYS GCPhysFirst;
6389 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6390 if (rcStrict != VINF_SUCCESS)
6391 return rcStrict;
6392
6393 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6394 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6395 if (fAccess & IEM_ACCESS_TYPE_READ)
6396 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6397
6398 void *pvMem;
6399 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6400 if (rcStrict != VINF_SUCCESS)
6401 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6402
6403#endif /* !IEM_WITH_DATA_TLB */
6404
6405 /*
6406 * Fill in the mapping table entry.
6407 */
6408 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6409 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6410 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6411 pVCpu->iem.s.cActiveMappings += 1;
6412
6413 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6414 *ppvMem = pvMem;
6415
6416 return VINF_SUCCESS;
6417}
6418
6419
6420/**
6421 * Commits the guest memory if bounce buffered and unmaps it.
6422 *
6423 * @returns Strict VBox status code.
6424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6425 * @param pvMem The mapping.
6426 * @param fAccess The kind of access.
6427 */
6428VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6429{
6430 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6431 AssertReturn(iMemMap >= 0, iMemMap);
6432
6433 /* If it's bounce buffered, we may need to write back the buffer. */
6434 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6435 {
6436 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6437 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6438 }
6439 /* Otherwise unlock it. */
6440 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6441 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6442
6443 /* Free the entry. */
6444 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6445 Assert(pVCpu->iem.s.cActiveMappings != 0);
6446 pVCpu->iem.s.cActiveMappings--;
6447 return VINF_SUCCESS;
6448}
6449
6450#ifdef IEM_WITH_SETJMP
6451
6452/**
6453 * Maps the specified guest memory for the given kind of access, longjmp on
6454 * error.
6455 *
6456 * This may be using bounce buffering of the memory if it's crossing a page
6457 * boundary or if there is an access handler installed for any of it. Because
6458 * of lock prefix guarantees, we're in for some extra clutter when this
6459 * happens.
6460 *
6461 * This may raise a \#GP, \#SS, \#PF or \#AC.
6462 *
6463 * @returns Pointer to the mapped memory.
6464 *
6465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6466 * @param cbMem The number of bytes to map. This is usually 1,
6467 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6468 * string operations it can be up to a page.
6469 * @param iSegReg The index of the segment register to use for
6470 * this access. The base and limits are checked.
6471 * Use UINT8_MAX to indicate that no segmentation
6472 * is required (for IDT, GDT and LDT accesses).
6473 * @param GCPtrMem The address of the guest memory.
6474 * @param fAccess How the memory is being accessed. The
6475 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6476 * how to map the memory, while the
6477 * IEM_ACCESS_WHAT_XXX bit is used when raising
6478 * exceptions.
6479 * @param uAlignCtl Alignment control:
6480 * - Bits 15:0 is the alignment mask.
6481 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6482 * IEM_MEMMAP_F_ALIGN_SSE, and
6483 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6484 * Pass zero to skip alignment.
6485 */
6486void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6487 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6488{
6489 /*
6490 * Check the input, check segment access and adjust address
6491 * with segment base.
6492 */
6493 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6494 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6495 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6496
6497 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6498 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6499 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6500
6501 /*
6502 * Alignment check.
6503 */
6504 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6505 { /* likelyish */ }
6506 else
6507 {
6508 /* Misaligned access. */
6509 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6510 {
6511 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6512 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6513 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6514 {
6515 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6516
6517 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6518 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6519 }
6520 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6521 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6522 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6523 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6524 * that's what FXSAVE does on a 10980xe. */
6525 && iemMemAreAlignmentChecksEnabled(pVCpu))
6526 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6527 else
6528 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6529 }
6530 }
6531
6532 /*
6533 * Figure out which mapping entry to use.
6534 */
6535 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6536 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6537 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6538 {
6539 iMemMap = iemMemMapFindFree(pVCpu);
6540 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6541 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6542 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6543 pVCpu->iem.s.aMemMappings[2].fAccess),
6544 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6545 }
6546
6547 /*
6548 * Crossing a page boundary?
6549 */
6550 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6551 { /* No (likely). */ }
6552 else
6553 {
6554 void *pvMem;
6555 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6556 if (rcStrict == VINF_SUCCESS)
6557 return pvMem;
6558 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6559 }
6560
6561#ifdef IEM_WITH_DATA_TLB
6562 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6563
6564 /*
6565 * Get the TLB entry for this page.
6566 */
6567 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6568 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6569 if (pTlbe->uTag == uTag)
6570 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6571 else
6572 {
6573 pVCpu->iem.s.DataTlb.cTlbMisses++;
6574 PGMPTWALK Walk;
6575 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6576 if (RT_FAILURE(rc))
6577 {
6578 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6579# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6580 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6581 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6582# endif
6583 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6584 }
6585
6586 Assert(Walk.fSucceeded);
6587 pTlbe->uTag = uTag;
6588 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6589 pTlbe->GCPhys = Walk.GCPhys;
6590 pTlbe->pbMappingR3 = NULL;
6591 }
6592
6593 /*
6594 * Check the flags and physical revision.
6595 */
6596 /** @todo make the caller pass these in with fAccess. */
6597 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6598 ? IEMTLBE_F_PT_NO_USER : 0;
6599 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6600 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6601 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6602 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6603 ? IEMTLBE_F_PT_NO_WRITE : 0)
6604 : 0;
6605 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6606 uint8_t *pbMem = NULL;
6607 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6608 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6609# ifdef IN_RING3
6610 pbMem = pTlbe->pbMappingR3;
6611# else
6612 pbMem = NULL;
6613# endif
6614 else
6615 {
6616 /*
6617 * Okay, something isn't quite right or needs refreshing.
6618 */
6619 /* Write to read only memory? */
6620 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6621 {
6622 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6623# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6624 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6625 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6626# endif
6627 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6628 }
6629
6630 /* Kernel memory accessed by userland? */
6631 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6632 {
6633 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6634# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6635 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6636 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6637# endif
6638 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6639 }
6640
6641 /* Set the dirty / access flags.
6642 ASSUMES this is set when the address is translated rather than on commit... */
6643 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6644 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6645 {
6646 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6647 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6648 AssertRC(rc2);
6649 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6650 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6651 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6652 }
6653
6654 /*
6655 * Check if the physical page info needs updating.
6656 */
6657 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6658# ifdef IN_RING3
6659 pbMem = pTlbe->pbMappingR3;
6660# else
6661 pbMem = NULL;
6662# endif
6663 else
6664 {
6665 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6666 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6667 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6668 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6669 pTlbe->pbMappingR3 = NULL;
6670 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6671 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6672 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6673 &pbMem, &pTlbe->fFlagsAndPhysRev);
6674 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6675# ifdef IN_RING3
6676 pTlbe->pbMappingR3 = pbMem;
6677# endif
6678 }
6679
6680 /*
6681 * Check the physical page level access and mapping.
6682 */
6683 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6684 { /* probably likely */ }
6685 else
6686 {
6687 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6688 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6689 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6690 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6691 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6692 if (rcStrict == VINF_SUCCESS)
6693 return pbMem;
6694 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6695 }
6696 }
6697 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6698
6699 if (pbMem)
6700 {
6701 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6702 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6703 fAccess |= IEM_ACCESS_NOT_LOCKED;
6704 }
6705 else
6706 {
6707 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6708 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6709 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6710 if (rcStrict == VINF_SUCCESS)
6711 return pbMem;
6712 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6713 }
6714
6715 void * const pvMem = pbMem;
6716
6717 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6718 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6719 if (fAccess & IEM_ACCESS_TYPE_READ)
6720 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6721
6722#else /* !IEM_WITH_DATA_TLB */
6723
6724
6725 RTGCPHYS GCPhysFirst;
6726 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6727 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6728 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6729
6730 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6731 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6732 if (fAccess & IEM_ACCESS_TYPE_READ)
6733 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6734
6735 void *pvMem;
6736 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6737 if (rcStrict == VINF_SUCCESS)
6738 { /* likely */ }
6739 else
6740 {
6741 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6742 if (rcStrict == VINF_SUCCESS)
6743 return pvMem;
6744 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6745 }
6746
6747#endif /* !IEM_WITH_DATA_TLB */
6748
6749 /*
6750 * Fill in the mapping table entry.
6751 */
6752 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6753 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6754 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6755 pVCpu->iem.s.cActiveMappings++;
6756
6757 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6758 return pvMem;
6759}
6760
6761
6762/**
6763 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6764 *
6765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6766 * @param pvMem The mapping.
6767 * @param fAccess The kind of access.
6768 */
6769void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6770{
6771 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6772 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6773
6774 /* If it's bounce buffered, we may need to write back the buffer. */
6775 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6776 {
6777 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6778 {
6779 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6780 if (rcStrict == VINF_SUCCESS)
6781 return;
6782 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6783 }
6784 }
6785 /* Otherwise unlock it. */
6786 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6787 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6788
6789 /* Free the entry. */
6790 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6791 Assert(pVCpu->iem.s.cActiveMappings != 0);
6792 pVCpu->iem.s.cActiveMappings--;
6793}
6794
6795#endif /* IEM_WITH_SETJMP */
6796
6797#ifndef IN_RING3
6798/**
6799 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6800 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6801 *
6802 * Allows the instruction to be completed and retired, while the IEM user will
6803 * return to ring-3 immediately afterwards and do the postponed writes there.
6804 *
6805 * @returns VBox status code (no strict statuses). Caller must check
6806 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6808 * @param pvMem The mapping.
6809 * @param fAccess The kind of access.
6810 */
6811VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6812{
6813 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6814 AssertReturn(iMemMap >= 0, iMemMap);
6815
6816 /* If it's bounce buffered, we may need to write back the buffer. */
6817 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6818 {
6819 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6820 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6821 }
6822 /* Otherwise unlock it. */
6823 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6824 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6825
6826 /* Free the entry. */
6827 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6828 Assert(pVCpu->iem.s.cActiveMappings != 0);
6829 pVCpu->iem.s.cActiveMappings--;
6830 return VINF_SUCCESS;
6831}
6832#endif
6833
6834
6835/**
6836 * Rollbacks mappings, releasing page locks and such.
6837 *
6838 * The caller shall only call this after checking cActiveMappings.
6839 *
6840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6841 */
6842void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6843{
6844 Assert(pVCpu->iem.s.cActiveMappings > 0);
6845
6846 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6847 while (iMemMap-- > 0)
6848 {
6849 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6850 if (fAccess != IEM_ACCESS_INVALID)
6851 {
6852 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6853 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6854 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6855 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6856 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6857 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6858 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6859 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6860 pVCpu->iem.s.cActiveMappings--;
6861 }
6862 }
6863}
6864
6865
6866/**
6867 * Fetches a data byte.
6868 *
6869 * @returns Strict VBox status code.
6870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6871 * @param pu8Dst Where to return the byte.
6872 * @param iSegReg The index of the segment register to use for
6873 * this access. The base and limits are checked.
6874 * @param GCPtrMem The address of the guest memory.
6875 */
6876VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6877{
6878 /* The lazy approach for now... */
6879 uint8_t const *pu8Src;
6880 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6881 if (rc == VINF_SUCCESS)
6882 {
6883 *pu8Dst = *pu8Src;
6884 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6885 }
6886 return rc;
6887}
6888
6889
6890#ifdef IEM_WITH_SETJMP
6891/**
6892 * Fetches a data byte, longjmp on error.
6893 *
6894 * @returns The byte.
6895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6896 * @param iSegReg The index of the segment register to use for
6897 * this access. The base and limits are checked.
6898 * @param GCPtrMem The address of the guest memory.
6899 */
6900uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6901{
6902 /* The lazy approach for now... */
6903 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6904 uint8_t const bRet = *pu8Src;
6905 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6906 return bRet;
6907}
6908#endif /* IEM_WITH_SETJMP */
6909
6910
6911/**
6912 * Fetches a data word.
6913 *
6914 * @returns Strict VBox status code.
6915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6916 * @param pu16Dst Where to return the word.
6917 * @param iSegReg The index of the segment register to use for
6918 * this access. The base and limits are checked.
6919 * @param GCPtrMem The address of the guest memory.
6920 */
6921VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6922{
6923 /* The lazy approach for now... */
6924 uint16_t const *pu16Src;
6925 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6926 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6927 if (rc == VINF_SUCCESS)
6928 {
6929 *pu16Dst = *pu16Src;
6930 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6931 }
6932 return rc;
6933}
6934
6935
6936#ifdef IEM_WITH_SETJMP
6937/**
6938 * Fetches a data word, longjmp on error.
6939 *
6940 * @returns The word
6941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6942 * @param iSegReg The index of the segment register to use for
6943 * this access. The base and limits are checked.
6944 * @param GCPtrMem The address of the guest memory.
6945 */
6946uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6947{
6948 /* The lazy approach for now... */
6949 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6950 sizeof(*pu16Src) - 1);
6951 uint16_t const u16Ret = *pu16Src;
6952 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6953 return u16Ret;
6954}
6955#endif
6956
6957
6958/**
6959 * Fetches a data dword.
6960 *
6961 * @returns Strict VBox status code.
6962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6963 * @param pu32Dst Where to return the dword.
6964 * @param iSegReg The index of the segment register to use for
6965 * this access. The base and limits are checked.
6966 * @param GCPtrMem The address of the guest memory.
6967 */
6968VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6969{
6970 /* The lazy approach for now... */
6971 uint32_t const *pu32Src;
6972 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6973 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6974 if (rc == VINF_SUCCESS)
6975 {
6976 *pu32Dst = *pu32Src;
6977 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6978 }
6979 return rc;
6980}
6981
6982
6983/**
6984 * Fetches a data dword and zero extends it to a qword.
6985 *
6986 * @returns Strict VBox status code.
6987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6988 * @param pu64Dst Where to return the qword.
6989 * @param iSegReg The index of the segment register to use for
6990 * this access. The base and limits are checked.
6991 * @param GCPtrMem The address of the guest memory.
6992 */
6993VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6994{
6995 /* The lazy approach for now... */
6996 uint32_t const *pu32Src;
6997 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6998 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6999 if (rc == VINF_SUCCESS)
7000 {
7001 *pu64Dst = *pu32Src;
7002 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7003 }
7004 return rc;
7005}
7006
7007
7008#ifdef IEM_WITH_SETJMP
7009
7010/**
7011 * Fetches a data dword, longjmp on error, fallback/safe version.
7012 *
7013 * @returns The dword
7014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7015 * @param iSegReg The index of the segment register to use for
7016 * this access. The base and limits are checked.
7017 * @param GCPtrMem The address of the guest memory.
7018 */
7019uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7020{
7021 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7022 sizeof(*pu32Src) - 1);
7023 uint32_t const u32Ret = *pu32Src;
7024 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7025 return u32Ret;
7026}
7027
7028
7029/**
7030 * Fetches a data dword, longjmp on error.
7031 *
7032 * @returns The dword
7033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7034 * @param iSegReg The index of the segment register to use for
7035 * this access. The base and limits are checked.
7036 * @param GCPtrMem The address of the guest memory.
7037 */
7038uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7039{
7040# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7041 /*
7042 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7043 */
7044 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7045 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7046 {
7047 /*
7048 * TLB lookup.
7049 */
7050 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7051 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7052 if (pTlbe->uTag == uTag)
7053 {
7054 /*
7055 * Check TLB page table level access flags.
7056 */
7057 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7058 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7059 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7060 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7061 {
7062 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7063
7064 /*
7065 * Alignment check:
7066 */
7067 /** @todo check priority \#AC vs \#PF */
7068 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7069 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7070 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7071 || IEM_GET_CPL(pVCpu) != 3)
7072 {
7073 /*
7074 * Fetch and return the dword
7075 */
7076 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7077 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7078 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7079 }
7080 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7081 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7082 }
7083 }
7084 }
7085
7086 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7087 outdated page pointer, or other troubles. */
7088 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7089 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7090
7091# else
7092 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7093 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7094 uint32_t const u32Ret = *pu32Src;
7095 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7096 return u32Ret;
7097# endif
7098}
7099#endif
7100
7101
7102#ifdef SOME_UNUSED_FUNCTION
7103/**
7104 * Fetches a data dword and sign extends it to a qword.
7105 *
7106 * @returns Strict VBox status code.
7107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7108 * @param pu64Dst Where to return the sign extended value.
7109 * @param iSegReg The index of the segment register to use for
7110 * this access. The base and limits are checked.
7111 * @param GCPtrMem The address of the guest memory.
7112 */
7113VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7114{
7115 /* The lazy approach for now... */
7116 int32_t const *pi32Src;
7117 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7118 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7119 if (rc == VINF_SUCCESS)
7120 {
7121 *pu64Dst = *pi32Src;
7122 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7123 }
7124#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7125 else
7126 *pu64Dst = 0;
7127#endif
7128 return rc;
7129}
7130#endif
7131
7132
7133/**
7134 * Fetches a data qword.
7135 *
7136 * @returns Strict VBox status code.
7137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7138 * @param pu64Dst Where to return the qword.
7139 * @param iSegReg The index of the segment register to use for
7140 * this access. The base and limits are checked.
7141 * @param GCPtrMem The address of the guest memory.
7142 */
7143VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7144{
7145 /* The lazy approach for now... */
7146 uint64_t const *pu64Src;
7147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7148 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7149 if (rc == VINF_SUCCESS)
7150 {
7151 *pu64Dst = *pu64Src;
7152 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7153 }
7154 return rc;
7155}
7156
7157
7158#ifdef IEM_WITH_SETJMP
7159/**
7160 * Fetches a data qword, longjmp on error.
7161 *
7162 * @returns The qword.
7163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7164 * @param iSegReg The index of the segment register to use for
7165 * this access. The base and limits are checked.
7166 * @param GCPtrMem The address of the guest memory.
7167 */
7168uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7169{
7170 /* The lazy approach for now... */
7171 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7172 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7173 uint64_t const u64Ret = *pu64Src;
7174 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7175 return u64Ret;
7176}
7177#endif
7178
7179
7180/**
7181 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7182 *
7183 * @returns Strict VBox status code.
7184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7185 * @param pu64Dst Where to return the qword.
7186 * @param iSegReg The index of the segment register to use for
7187 * this access. The base and limits are checked.
7188 * @param GCPtrMem The address of the guest memory.
7189 */
7190VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7191{
7192 /* The lazy approach for now... */
7193 uint64_t const *pu64Src;
7194 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7195 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7196 if (rc == VINF_SUCCESS)
7197 {
7198 *pu64Dst = *pu64Src;
7199 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7200 }
7201 return rc;
7202}
7203
7204
7205#ifdef IEM_WITH_SETJMP
7206/**
7207 * Fetches a data qword, longjmp on error.
7208 *
7209 * @returns The qword.
7210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7211 * @param iSegReg The index of the segment register to use for
7212 * this access. The base and limits are checked.
7213 * @param GCPtrMem The address of the guest memory.
7214 */
7215uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7216{
7217 /* The lazy approach for now... */
7218 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7219 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7220 uint64_t const u64Ret = *pu64Src;
7221 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7222 return u64Ret;
7223}
7224#endif
7225
7226
7227/**
7228 * Fetches a data tword.
7229 *
7230 * @returns Strict VBox status code.
7231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7232 * @param pr80Dst Where to return the tword.
7233 * @param iSegReg The index of the segment register to use for
7234 * this access. The base and limits are checked.
7235 * @param GCPtrMem The address of the guest memory.
7236 */
7237VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7238{
7239 /* The lazy approach for now... */
7240 PCRTFLOAT80U pr80Src;
7241 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7242 if (rc == VINF_SUCCESS)
7243 {
7244 *pr80Dst = *pr80Src;
7245 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7246 }
7247 return rc;
7248}
7249
7250
7251#ifdef IEM_WITH_SETJMP
7252/**
7253 * Fetches a data tword, longjmp on error.
7254 *
7255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7256 * @param pr80Dst Where to return the tword.
7257 * @param iSegReg The index of the segment register to use for
7258 * this access. The base and limits are checked.
7259 * @param GCPtrMem The address of the guest memory.
7260 */
7261void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7262{
7263 /* The lazy approach for now... */
7264 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7265 *pr80Dst = *pr80Src;
7266 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7267}
7268#endif
7269
7270
7271/**
7272 * Fetches a data decimal tword.
7273 *
7274 * @returns Strict VBox status code.
7275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7276 * @param pd80Dst Where to return the tword.
7277 * @param iSegReg The index of the segment register to use for
7278 * this access. The base and limits are checked.
7279 * @param GCPtrMem The address of the guest memory.
7280 */
7281VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7282{
7283 /* The lazy approach for now... */
7284 PCRTPBCD80U pd80Src;
7285 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7286 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7287 if (rc == VINF_SUCCESS)
7288 {
7289 *pd80Dst = *pd80Src;
7290 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7291 }
7292 return rc;
7293}
7294
7295
7296#ifdef IEM_WITH_SETJMP
7297/**
7298 * Fetches a data decimal tword, longjmp on error.
7299 *
7300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7301 * @param pd80Dst Where to return the tword.
7302 * @param iSegReg The index of the segment register to use for
7303 * this access. The base and limits are checked.
7304 * @param GCPtrMem The address of the guest memory.
7305 */
7306void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7307{
7308 /* The lazy approach for now... */
7309 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7310 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7311 *pd80Dst = *pd80Src;
7312 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7313}
7314#endif
7315
7316
7317/**
7318 * Fetches a data dqword (double qword), generally SSE related.
7319 *
7320 * @returns Strict VBox status code.
7321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7322 * @param pu128Dst Where to return the qword.
7323 * @param iSegReg The index of the segment register to use for
7324 * this access. The base and limits are checked.
7325 * @param GCPtrMem The address of the guest memory.
7326 */
7327VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7328{
7329 /* The lazy approach for now... */
7330 PCRTUINT128U pu128Src;
7331 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7332 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7333 if (rc == VINF_SUCCESS)
7334 {
7335 pu128Dst->au64[0] = pu128Src->au64[0];
7336 pu128Dst->au64[1] = pu128Src->au64[1];
7337 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7338 }
7339 return rc;
7340}
7341
7342
7343#ifdef IEM_WITH_SETJMP
7344/**
7345 * Fetches a data dqword (double qword), generally SSE related.
7346 *
7347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7348 * @param pu128Dst Where to return the qword.
7349 * @param iSegReg The index of the segment register to use for
7350 * this access. The base and limits are checked.
7351 * @param GCPtrMem The address of the guest memory.
7352 */
7353void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7354{
7355 /* The lazy approach for now... */
7356 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7357 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7358 pu128Dst->au64[0] = pu128Src->au64[0];
7359 pu128Dst->au64[1] = pu128Src->au64[1];
7360 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7361}
7362#endif
7363
7364
7365/**
7366 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7367 * related.
7368 *
7369 * Raises \#GP(0) if not aligned.
7370 *
7371 * @returns Strict VBox status code.
7372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7373 * @param pu128Dst Where to return the qword.
7374 * @param iSegReg The index of the segment register to use for
7375 * this access. The base and limits are checked.
7376 * @param GCPtrMem The address of the guest memory.
7377 */
7378VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7379{
7380 /* The lazy approach for now... */
7381 PCRTUINT128U pu128Src;
7382 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7383 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7384 if (rc == VINF_SUCCESS)
7385 {
7386 pu128Dst->au64[0] = pu128Src->au64[0];
7387 pu128Dst->au64[1] = pu128Src->au64[1];
7388 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7389 }
7390 return rc;
7391}
7392
7393
7394#ifdef IEM_WITH_SETJMP
7395/**
7396 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7397 * related, longjmp on error.
7398 *
7399 * Raises \#GP(0) if not aligned.
7400 *
7401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7402 * @param pu128Dst Where to return the qword.
7403 * @param iSegReg The index of the segment register to use for
7404 * this access. The base and limits are checked.
7405 * @param GCPtrMem The address of the guest memory.
7406 */
7407void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7408 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7409{
7410 /* The lazy approach for now... */
7411 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7412 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7413 pu128Dst->au64[0] = pu128Src->au64[0];
7414 pu128Dst->au64[1] = pu128Src->au64[1];
7415 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7416}
7417#endif
7418
7419
7420/**
7421 * Fetches a data oword (octo word), generally AVX related.
7422 *
7423 * @returns Strict VBox status code.
7424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7425 * @param pu256Dst Where to return the qword.
7426 * @param iSegReg The index of the segment register to use for
7427 * this access. The base and limits are checked.
7428 * @param GCPtrMem The address of the guest memory.
7429 */
7430VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7431{
7432 /* The lazy approach for now... */
7433 PCRTUINT256U pu256Src;
7434 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7435 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7436 if (rc == VINF_SUCCESS)
7437 {
7438 pu256Dst->au64[0] = pu256Src->au64[0];
7439 pu256Dst->au64[1] = pu256Src->au64[1];
7440 pu256Dst->au64[2] = pu256Src->au64[2];
7441 pu256Dst->au64[3] = pu256Src->au64[3];
7442 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7443 }
7444 return rc;
7445}
7446
7447
7448#ifdef IEM_WITH_SETJMP
7449/**
7450 * Fetches a data oword (octo word), generally AVX related.
7451 *
7452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7453 * @param pu256Dst Where to return the qword.
7454 * @param iSegReg The index of the segment register to use for
7455 * this access. The base and limits are checked.
7456 * @param GCPtrMem The address of the guest memory.
7457 */
7458void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7459{
7460 /* The lazy approach for now... */
7461 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7462 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7463 pu256Dst->au64[0] = pu256Src->au64[0];
7464 pu256Dst->au64[1] = pu256Src->au64[1];
7465 pu256Dst->au64[2] = pu256Src->au64[2];
7466 pu256Dst->au64[3] = pu256Src->au64[3];
7467 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7468}
7469#endif
7470
7471
7472/**
7473 * Fetches a data oword (octo word) at an aligned address, generally AVX
7474 * related.
7475 *
7476 * Raises \#GP(0) if not aligned.
7477 *
7478 * @returns Strict VBox status code.
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 * @param pu256Dst Where to return the qword.
7481 * @param iSegReg The index of the segment register to use for
7482 * this access. The base and limits are checked.
7483 * @param GCPtrMem The address of the guest memory.
7484 */
7485VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7486{
7487 /* The lazy approach for now... */
7488 PCRTUINT256U pu256Src;
7489 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7490 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7491 if (rc == VINF_SUCCESS)
7492 {
7493 pu256Dst->au64[0] = pu256Src->au64[0];
7494 pu256Dst->au64[1] = pu256Src->au64[1];
7495 pu256Dst->au64[2] = pu256Src->au64[2];
7496 pu256Dst->au64[3] = pu256Src->au64[3];
7497 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7498 }
7499 return rc;
7500}
7501
7502
7503#ifdef IEM_WITH_SETJMP
7504/**
7505 * Fetches a data oword (octo word) at an aligned address, generally AVX
7506 * related, longjmp on error.
7507 *
7508 * Raises \#GP(0) if not aligned.
7509 *
7510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7511 * @param pu256Dst Where to return the qword.
7512 * @param iSegReg The index of the segment register to use for
7513 * this access. The base and limits are checked.
7514 * @param GCPtrMem The address of the guest memory.
7515 */
7516void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7517 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7518{
7519 /* The lazy approach for now... */
7520 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7521 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7522 pu256Dst->au64[0] = pu256Src->au64[0];
7523 pu256Dst->au64[1] = pu256Src->au64[1];
7524 pu256Dst->au64[2] = pu256Src->au64[2];
7525 pu256Dst->au64[3] = pu256Src->au64[3];
7526 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7527}
7528#endif
7529
7530
7531
7532/**
7533 * Fetches a descriptor register (lgdt, lidt).
7534 *
7535 * @returns Strict VBox status code.
7536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7537 * @param pcbLimit Where to return the limit.
7538 * @param pGCPtrBase Where to return the base.
7539 * @param iSegReg The index of the segment register to use for
7540 * this access. The base and limits are checked.
7541 * @param GCPtrMem The address of the guest memory.
7542 * @param enmOpSize The effective operand size.
7543 */
7544VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7545 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7546{
7547 /*
7548 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7549 * little special:
7550 * - The two reads are done separately.
7551 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7552 * - We suspect the 386 to actually commit the limit before the base in
7553 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7554 * don't try emulate this eccentric behavior, because it's not well
7555 * enough understood and rather hard to trigger.
7556 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7557 */
7558 VBOXSTRICTRC rcStrict;
7559 if (IEM_IS_64BIT_CODE(pVCpu))
7560 {
7561 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7562 if (rcStrict == VINF_SUCCESS)
7563 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7564 }
7565 else
7566 {
7567 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7568 if (enmOpSize == IEMMODE_32BIT)
7569 {
7570 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7571 {
7572 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7573 if (rcStrict == VINF_SUCCESS)
7574 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7575 }
7576 else
7577 {
7578 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7579 if (rcStrict == VINF_SUCCESS)
7580 {
7581 *pcbLimit = (uint16_t)uTmp;
7582 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7583 }
7584 }
7585 if (rcStrict == VINF_SUCCESS)
7586 *pGCPtrBase = uTmp;
7587 }
7588 else
7589 {
7590 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7591 if (rcStrict == VINF_SUCCESS)
7592 {
7593 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7594 if (rcStrict == VINF_SUCCESS)
7595 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7596 }
7597 }
7598 }
7599 return rcStrict;
7600}
7601
7602
7603
7604/**
7605 * Stores a data byte.
7606 *
7607 * @returns Strict VBox status code.
7608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7609 * @param iSegReg The index of the segment register to use for
7610 * this access. The base and limits are checked.
7611 * @param GCPtrMem The address of the guest memory.
7612 * @param u8Value The value to store.
7613 */
7614VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7615{
7616 /* The lazy approach for now... */
7617 uint8_t *pu8Dst;
7618 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7619 if (rc == VINF_SUCCESS)
7620 {
7621 *pu8Dst = u8Value;
7622 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7623 }
7624 return rc;
7625}
7626
7627
7628#ifdef IEM_WITH_SETJMP
7629/**
7630 * Stores a data byte, longjmp on error.
7631 *
7632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7633 * @param iSegReg The index of the segment register to use for
7634 * this access. The base and limits are checked.
7635 * @param GCPtrMem The address of the guest memory.
7636 * @param u8Value The value to store.
7637 */
7638void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7639{
7640 /* The lazy approach for now... */
7641 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7642 *pu8Dst = u8Value;
7643 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7644}
7645#endif
7646
7647
7648/**
7649 * Stores a data word.
7650 *
7651 * @returns Strict VBox status code.
7652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7653 * @param iSegReg The index of the segment register to use for
7654 * this access. The base and limits are checked.
7655 * @param GCPtrMem The address of the guest memory.
7656 * @param u16Value The value to store.
7657 */
7658VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7659{
7660 /* The lazy approach for now... */
7661 uint16_t *pu16Dst;
7662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7663 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7664 if (rc == VINF_SUCCESS)
7665 {
7666 *pu16Dst = u16Value;
7667 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7668 }
7669 return rc;
7670}
7671
7672
7673#ifdef IEM_WITH_SETJMP
7674/**
7675 * Stores a data word, longjmp on error.
7676 *
7677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7678 * @param iSegReg The index of the segment register to use for
7679 * this access. The base and limits are checked.
7680 * @param GCPtrMem The address of the guest memory.
7681 * @param u16Value The value to store.
7682 */
7683void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7684{
7685 /* The lazy approach for now... */
7686 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7687 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7688 *pu16Dst = u16Value;
7689 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7690}
7691#endif
7692
7693
7694/**
7695 * Stores a data dword.
7696 *
7697 * @returns Strict VBox status code.
7698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7699 * @param iSegReg The index of the segment register to use for
7700 * this access. The base and limits are checked.
7701 * @param GCPtrMem The address of the guest memory.
7702 * @param u32Value The value to store.
7703 */
7704VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7705{
7706 /* The lazy approach for now... */
7707 uint32_t *pu32Dst;
7708 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7709 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7710 if (rc == VINF_SUCCESS)
7711 {
7712 *pu32Dst = u32Value;
7713 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7714 }
7715 return rc;
7716}
7717
7718
7719#ifdef IEM_WITH_SETJMP
7720/**
7721 * Stores a data dword.
7722 *
7723 * @returns Strict VBox status code.
7724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7725 * @param iSegReg The index of the segment register to use for
7726 * this access. The base and limits are checked.
7727 * @param GCPtrMem The address of the guest memory.
7728 * @param u32Value The value to store.
7729 */
7730void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7731{
7732 /* The lazy approach for now... */
7733 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7734 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7735 *pu32Dst = u32Value;
7736 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7737}
7738#endif
7739
7740
7741/**
7742 * Stores a data qword.
7743 *
7744 * @returns Strict VBox status code.
7745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7746 * @param iSegReg The index of the segment register to use for
7747 * this access. The base and limits are checked.
7748 * @param GCPtrMem The address of the guest memory.
7749 * @param u64Value The value to store.
7750 */
7751VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7752{
7753 /* The lazy approach for now... */
7754 uint64_t *pu64Dst;
7755 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7756 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7757 if (rc == VINF_SUCCESS)
7758 {
7759 *pu64Dst = u64Value;
7760 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7761 }
7762 return rc;
7763}
7764
7765
7766#ifdef IEM_WITH_SETJMP
7767/**
7768 * Stores a data qword, longjmp on error.
7769 *
7770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7771 * @param iSegReg The index of the segment register to use for
7772 * this access. The base and limits are checked.
7773 * @param GCPtrMem The address of the guest memory.
7774 * @param u64Value The value to store.
7775 */
7776void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7777{
7778 /* The lazy approach for now... */
7779 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7780 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7781 *pu64Dst = u64Value;
7782 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7783}
7784#endif
7785
7786
7787/**
7788 * Stores a data dqword.
7789 *
7790 * @returns Strict VBox status code.
7791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7792 * @param iSegReg The index of the segment register to use for
7793 * this access. The base and limits are checked.
7794 * @param GCPtrMem The address of the guest memory.
7795 * @param u128Value The value to store.
7796 */
7797VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7798{
7799 /* The lazy approach for now... */
7800 PRTUINT128U pu128Dst;
7801 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7802 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7803 if (rc == VINF_SUCCESS)
7804 {
7805 pu128Dst->au64[0] = u128Value.au64[0];
7806 pu128Dst->au64[1] = u128Value.au64[1];
7807 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7808 }
7809 return rc;
7810}
7811
7812
7813#ifdef IEM_WITH_SETJMP
7814/**
7815 * Stores a data dqword, longjmp on error.
7816 *
7817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7818 * @param iSegReg The index of the segment register to use for
7819 * this access. The base and limits are checked.
7820 * @param GCPtrMem The address of the guest memory.
7821 * @param u128Value The value to store.
7822 */
7823void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7824{
7825 /* The lazy approach for now... */
7826 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7827 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7828 pu128Dst->au64[0] = u128Value.au64[0];
7829 pu128Dst->au64[1] = u128Value.au64[1];
7830 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7831}
7832#endif
7833
7834
7835/**
7836 * Stores a data dqword, SSE aligned.
7837 *
7838 * @returns Strict VBox status code.
7839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7840 * @param iSegReg The index of the segment register to use for
7841 * this access. The base and limits are checked.
7842 * @param GCPtrMem The address of the guest memory.
7843 * @param u128Value The value to store.
7844 */
7845VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7846{
7847 /* The lazy approach for now... */
7848 PRTUINT128U pu128Dst;
7849 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7850 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7851 if (rc == VINF_SUCCESS)
7852 {
7853 pu128Dst->au64[0] = u128Value.au64[0];
7854 pu128Dst->au64[1] = u128Value.au64[1];
7855 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7856 }
7857 return rc;
7858}
7859
7860
7861#ifdef IEM_WITH_SETJMP
7862/**
7863 * Stores a data dqword, SSE aligned.
7864 *
7865 * @returns Strict VBox status code.
7866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7867 * @param iSegReg The index of the segment register to use for
7868 * this access. The base and limits are checked.
7869 * @param GCPtrMem The address of the guest memory.
7870 * @param u128Value The value to store.
7871 */
7872void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7873 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7874{
7875 /* The lazy approach for now... */
7876 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7877 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7878 pu128Dst->au64[0] = u128Value.au64[0];
7879 pu128Dst->au64[1] = u128Value.au64[1];
7880 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7881}
7882#endif
7883
7884
7885/**
7886 * Stores a data dqword.
7887 *
7888 * @returns Strict VBox status code.
7889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7890 * @param iSegReg The index of the segment register to use for
7891 * this access. The base and limits are checked.
7892 * @param GCPtrMem The address of the guest memory.
7893 * @param pu256Value Pointer to the value to store.
7894 */
7895VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7896{
7897 /* The lazy approach for now... */
7898 PRTUINT256U pu256Dst;
7899 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7900 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7901 if (rc == VINF_SUCCESS)
7902 {
7903 pu256Dst->au64[0] = pu256Value->au64[0];
7904 pu256Dst->au64[1] = pu256Value->au64[1];
7905 pu256Dst->au64[2] = pu256Value->au64[2];
7906 pu256Dst->au64[3] = pu256Value->au64[3];
7907 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7908 }
7909 return rc;
7910}
7911
7912
7913#ifdef IEM_WITH_SETJMP
7914/**
7915 * Stores a data dqword, longjmp on error.
7916 *
7917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7918 * @param iSegReg The index of the segment register to use for
7919 * this access. The base and limits are checked.
7920 * @param GCPtrMem The address of the guest memory.
7921 * @param pu256Value Pointer to the value to store.
7922 */
7923void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7924{
7925 /* The lazy approach for now... */
7926 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7927 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7928 pu256Dst->au64[0] = pu256Value->au64[0];
7929 pu256Dst->au64[1] = pu256Value->au64[1];
7930 pu256Dst->au64[2] = pu256Value->au64[2];
7931 pu256Dst->au64[3] = pu256Value->au64[3];
7932 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7933}
7934#endif
7935
7936
7937/**
7938 * Stores a data dqword, AVX \#GP(0) aligned.
7939 *
7940 * @returns Strict VBox status code.
7941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7942 * @param iSegReg The index of the segment register to use for
7943 * this access. The base and limits are checked.
7944 * @param GCPtrMem The address of the guest memory.
7945 * @param pu256Value Pointer to the value to store.
7946 */
7947VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7948{
7949 /* The lazy approach for now... */
7950 PRTUINT256U pu256Dst;
7951 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7952 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7953 if (rc == VINF_SUCCESS)
7954 {
7955 pu256Dst->au64[0] = pu256Value->au64[0];
7956 pu256Dst->au64[1] = pu256Value->au64[1];
7957 pu256Dst->au64[2] = pu256Value->au64[2];
7958 pu256Dst->au64[3] = pu256Value->au64[3];
7959 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7960 }
7961 return rc;
7962}
7963
7964
7965#ifdef IEM_WITH_SETJMP
7966/**
7967 * Stores a data dqword, AVX aligned.
7968 *
7969 * @returns Strict VBox status code.
7970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7971 * @param iSegReg The index of the segment register to use for
7972 * this access. The base and limits are checked.
7973 * @param GCPtrMem The address of the guest memory.
7974 * @param pu256Value Pointer to the value to store.
7975 */
7976void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7977 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7978{
7979 /* The lazy approach for now... */
7980 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7981 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7982 pu256Dst->au64[0] = pu256Value->au64[0];
7983 pu256Dst->au64[1] = pu256Value->au64[1];
7984 pu256Dst->au64[2] = pu256Value->au64[2];
7985 pu256Dst->au64[3] = pu256Value->au64[3];
7986 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7987}
7988#endif
7989
7990
7991/**
7992 * Stores a descriptor register (sgdt, sidt).
7993 *
7994 * @returns Strict VBox status code.
7995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7996 * @param cbLimit The limit.
7997 * @param GCPtrBase The base address.
7998 * @param iSegReg The index of the segment register to use for
7999 * this access. The base and limits are checked.
8000 * @param GCPtrMem The address of the guest memory.
8001 */
8002VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8003{
8004 /*
8005 * The SIDT and SGDT instructions actually stores the data using two
8006 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8007 * does not respond to opsize prefixes.
8008 */
8009 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8010 if (rcStrict == VINF_SUCCESS)
8011 {
8012 if (IEM_IS_16BIT_CODE(pVCpu))
8013 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8014 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8015 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8016 else if (IEM_IS_32BIT_CODE(pVCpu))
8017 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8018 else
8019 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8020 }
8021 return rcStrict;
8022}
8023
8024
8025/**
8026 * Pushes a word onto the stack.
8027 *
8028 * @returns Strict VBox status code.
8029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8030 * @param u16Value The value to push.
8031 */
8032VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8033{
8034 /* Increment the stack pointer. */
8035 uint64_t uNewRsp;
8036 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8037
8038 /* Write the word the lazy way. */
8039 uint16_t *pu16Dst;
8040 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8041 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8042 if (rc == VINF_SUCCESS)
8043 {
8044 *pu16Dst = u16Value;
8045 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8046 }
8047
8048 /* Commit the new RSP value unless we an access handler made trouble. */
8049 if (rc == VINF_SUCCESS)
8050 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8051
8052 return rc;
8053}
8054
8055
8056/**
8057 * Pushes a dword onto the stack.
8058 *
8059 * @returns Strict VBox status code.
8060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8061 * @param u32Value The value to push.
8062 */
8063VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8064{
8065 /* Increment the stack pointer. */
8066 uint64_t uNewRsp;
8067 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8068
8069 /* Write the dword the lazy way. */
8070 uint32_t *pu32Dst;
8071 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8072 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8073 if (rc == VINF_SUCCESS)
8074 {
8075 *pu32Dst = u32Value;
8076 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8077 }
8078
8079 /* Commit the new RSP value unless we an access handler made trouble. */
8080 if (rc == VINF_SUCCESS)
8081 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8082
8083 return rc;
8084}
8085
8086
8087/**
8088 * Pushes a dword segment register value onto the stack.
8089 *
8090 * @returns Strict VBox status code.
8091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8092 * @param u32Value The value to push.
8093 */
8094VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8095{
8096 /* Increment the stack pointer. */
8097 uint64_t uNewRsp;
8098 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8099
8100 /* The intel docs talks about zero extending the selector register
8101 value. My actual intel CPU here might be zero extending the value
8102 but it still only writes the lower word... */
8103 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8104 * happens when crossing an electric page boundrary, is the high word checked
8105 * for write accessibility or not? Probably it is. What about segment limits?
8106 * It appears this behavior is also shared with trap error codes.
8107 *
8108 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8109 * ancient hardware when it actually did change. */
8110 uint16_t *pu16Dst;
8111 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8112 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8113 if (rc == VINF_SUCCESS)
8114 {
8115 *pu16Dst = (uint16_t)u32Value;
8116 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8117 }
8118
8119 /* Commit the new RSP value unless we an access handler made trouble. */
8120 if (rc == VINF_SUCCESS)
8121 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8122
8123 return rc;
8124}
8125
8126
8127/**
8128 * Pushes a qword onto the stack.
8129 *
8130 * @returns Strict VBox status code.
8131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8132 * @param u64Value The value to push.
8133 */
8134VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8135{
8136 /* Increment the stack pointer. */
8137 uint64_t uNewRsp;
8138 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8139
8140 /* Write the word the lazy way. */
8141 uint64_t *pu64Dst;
8142 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8143 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8144 if (rc == VINF_SUCCESS)
8145 {
8146 *pu64Dst = u64Value;
8147 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8148 }
8149
8150 /* Commit the new RSP value unless we an access handler made trouble. */
8151 if (rc == VINF_SUCCESS)
8152 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8153
8154 return rc;
8155}
8156
8157
8158/**
8159 * Pops a word from the stack.
8160 *
8161 * @returns Strict VBox status code.
8162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8163 * @param pu16Value Where to store the popped value.
8164 */
8165VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8166{
8167 /* Increment the stack pointer. */
8168 uint64_t uNewRsp;
8169 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8170
8171 /* Write the word the lazy way. */
8172 uint16_t const *pu16Src;
8173 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8174 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8175 if (rc == VINF_SUCCESS)
8176 {
8177 *pu16Value = *pu16Src;
8178 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8179
8180 /* Commit the new RSP value. */
8181 if (rc == VINF_SUCCESS)
8182 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8183 }
8184
8185 return rc;
8186}
8187
8188
8189/**
8190 * Pops a dword from the stack.
8191 *
8192 * @returns Strict VBox status code.
8193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8194 * @param pu32Value Where to store the popped value.
8195 */
8196VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8197{
8198 /* Increment the stack pointer. */
8199 uint64_t uNewRsp;
8200 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8201
8202 /* Write the word the lazy way. */
8203 uint32_t const *pu32Src;
8204 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8205 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8206 if (rc == VINF_SUCCESS)
8207 {
8208 *pu32Value = *pu32Src;
8209 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8210
8211 /* Commit the new RSP value. */
8212 if (rc == VINF_SUCCESS)
8213 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8214 }
8215
8216 return rc;
8217}
8218
8219
8220/**
8221 * Pops a qword from the stack.
8222 *
8223 * @returns Strict VBox status code.
8224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8225 * @param pu64Value Where to store the popped value.
8226 */
8227VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8228{
8229 /* Increment the stack pointer. */
8230 uint64_t uNewRsp;
8231 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8232
8233 /* Write the word the lazy way. */
8234 uint64_t const *pu64Src;
8235 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8236 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8237 if (rc == VINF_SUCCESS)
8238 {
8239 *pu64Value = *pu64Src;
8240 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8241
8242 /* Commit the new RSP value. */
8243 if (rc == VINF_SUCCESS)
8244 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8245 }
8246
8247 return rc;
8248}
8249
8250
8251/**
8252 * Pushes a word onto the stack, using a temporary stack pointer.
8253 *
8254 * @returns Strict VBox status code.
8255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8256 * @param u16Value The value to push.
8257 * @param pTmpRsp Pointer to the temporary stack pointer.
8258 */
8259VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8260{
8261 /* Increment the stack pointer. */
8262 RTUINT64U NewRsp = *pTmpRsp;
8263 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8264
8265 /* Write the word the lazy way. */
8266 uint16_t *pu16Dst;
8267 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8268 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8269 if (rc == VINF_SUCCESS)
8270 {
8271 *pu16Dst = u16Value;
8272 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8273 }
8274
8275 /* Commit the new RSP value unless we an access handler made trouble. */
8276 if (rc == VINF_SUCCESS)
8277 *pTmpRsp = NewRsp;
8278
8279 return rc;
8280}
8281
8282
8283/**
8284 * Pushes a dword onto the stack, using a temporary stack pointer.
8285 *
8286 * @returns Strict VBox status code.
8287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8288 * @param u32Value The value to push.
8289 * @param pTmpRsp Pointer to the temporary stack pointer.
8290 */
8291VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8292{
8293 /* Increment the stack pointer. */
8294 RTUINT64U NewRsp = *pTmpRsp;
8295 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8296
8297 /* Write the word the lazy way. */
8298 uint32_t *pu32Dst;
8299 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8300 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8301 if (rc == VINF_SUCCESS)
8302 {
8303 *pu32Dst = u32Value;
8304 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8305 }
8306
8307 /* Commit the new RSP value unless we an access handler made trouble. */
8308 if (rc == VINF_SUCCESS)
8309 *pTmpRsp = NewRsp;
8310
8311 return rc;
8312}
8313
8314
8315/**
8316 * Pushes a dword onto the stack, using a temporary stack pointer.
8317 *
8318 * @returns Strict VBox status code.
8319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8320 * @param u64Value The value to push.
8321 * @param pTmpRsp Pointer to the temporary stack pointer.
8322 */
8323VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8324{
8325 /* Increment the stack pointer. */
8326 RTUINT64U NewRsp = *pTmpRsp;
8327 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8328
8329 /* Write the word the lazy way. */
8330 uint64_t *pu64Dst;
8331 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8332 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8333 if (rc == VINF_SUCCESS)
8334 {
8335 *pu64Dst = u64Value;
8336 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8337 }
8338
8339 /* Commit the new RSP value unless we an access handler made trouble. */
8340 if (rc == VINF_SUCCESS)
8341 *pTmpRsp = NewRsp;
8342
8343 return rc;
8344}
8345
8346
8347/**
8348 * Pops a word from the stack, using a temporary stack pointer.
8349 *
8350 * @returns Strict VBox status code.
8351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8352 * @param pu16Value Where to store the popped value.
8353 * @param pTmpRsp Pointer to the temporary stack pointer.
8354 */
8355VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8356{
8357 /* Increment the stack pointer. */
8358 RTUINT64U NewRsp = *pTmpRsp;
8359 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8360
8361 /* Write the word the lazy way. */
8362 uint16_t const *pu16Src;
8363 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8364 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8365 if (rc == VINF_SUCCESS)
8366 {
8367 *pu16Value = *pu16Src;
8368 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8369
8370 /* Commit the new RSP value. */
8371 if (rc == VINF_SUCCESS)
8372 *pTmpRsp = NewRsp;
8373 }
8374
8375 return rc;
8376}
8377
8378
8379/**
8380 * Pops a dword from the stack, using a temporary stack pointer.
8381 *
8382 * @returns Strict VBox status code.
8383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8384 * @param pu32Value Where to store the popped value.
8385 * @param pTmpRsp Pointer to the temporary stack pointer.
8386 */
8387VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8388{
8389 /* Increment the stack pointer. */
8390 RTUINT64U NewRsp = *pTmpRsp;
8391 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8392
8393 /* Write the word the lazy way. */
8394 uint32_t const *pu32Src;
8395 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8396 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8397 if (rc == VINF_SUCCESS)
8398 {
8399 *pu32Value = *pu32Src;
8400 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8401
8402 /* Commit the new RSP value. */
8403 if (rc == VINF_SUCCESS)
8404 *pTmpRsp = NewRsp;
8405 }
8406
8407 return rc;
8408}
8409
8410
8411/**
8412 * Pops a qword from the stack, using a temporary stack pointer.
8413 *
8414 * @returns Strict VBox status code.
8415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8416 * @param pu64Value Where to store the popped value.
8417 * @param pTmpRsp Pointer to the temporary stack pointer.
8418 */
8419VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8420{
8421 /* Increment the stack pointer. */
8422 RTUINT64U NewRsp = *pTmpRsp;
8423 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8424
8425 /* Write the word the lazy way. */
8426 uint64_t const *pu64Src;
8427 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8428 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8429 if (rcStrict == VINF_SUCCESS)
8430 {
8431 *pu64Value = *pu64Src;
8432 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8433
8434 /* Commit the new RSP value. */
8435 if (rcStrict == VINF_SUCCESS)
8436 *pTmpRsp = NewRsp;
8437 }
8438
8439 return rcStrict;
8440}
8441
8442
8443/**
8444 * Begin a special stack push (used by interrupt, exceptions and such).
8445 *
8446 * This will raise \#SS or \#PF if appropriate.
8447 *
8448 * @returns Strict VBox status code.
8449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8450 * @param cbMem The number of bytes to push onto the stack.
8451 * @param cbAlign The alignment mask (7, 3, 1).
8452 * @param ppvMem Where to return the pointer to the stack memory.
8453 * As with the other memory functions this could be
8454 * direct access or bounce buffered access, so
8455 * don't commit register until the commit call
8456 * succeeds.
8457 * @param puNewRsp Where to return the new RSP value. This must be
8458 * passed unchanged to
8459 * iemMemStackPushCommitSpecial().
8460 */
8461VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8462 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8463{
8464 Assert(cbMem < UINT8_MAX);
8465 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8466 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8467 IEM_ACCESS_STACK_W, cbAlign);
8468}
8469
8470
8471/**
8472 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8473 *
8474 * This will update the rSP.
8475 *
8476 * @returns Strict VBox status code.
8477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8478 * @param pvMem The pointer returned by
8479 * iemMemStackPushBeginSpecial().
8480 * @param uNewRsp The new RSP value returned by
8481 * iemMemStackPushBeginSpecial().
8482 */
8483VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8484{
8485 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8486 if (rcStrict == VINF_SUCCESS)
8487 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8488 return rcStrict;
8489}
8490
8491
8492/**
8493 * Begin a special stack pop (used by iret, retf and such).
8494 *
8495 * This will raise \#SS or \#PF if appropriate.
8496 *
8497 * @returns Strict VBox status code.
8498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8499 * @param cbMem The number of bytes to pop from the stack.
8500 * @param cbAlign The alignment mask (7, 3, 1).
8501 * @param ppvMem Where to return the pointer to the stack memory.
8502 * @param puNewRsp Where to return the new RSP value. This must be
8503 * assigned to CPUMCTX::rsp manually some time
8504 * after iemMemStackPopDoneSpecial() has been
8505 * called.
8506 */
8507VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8508 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8509{
8510 Assert(cbMem < UINT8_MAX);
8511 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8512 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8513}
8514
8515
8516/**
8517 * Continue a special stack pop (used by iret and retf), for the purpose of
8518 * retrieving a new stack pointer.
8519 *
8520 * This will raise \#SS or \#PF if appropriate.
8521 *
8522 * @returns Strict VBox status code.
8523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8524 * @param off Offset from the top of the stack. This is zero
8525 * except in the retf case.
8526 * @param cbMem The number of bytes to pop from the stack.
8527 * @param ppvMem Where to return the pointer to the stack memory.
8528 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8529 * return this because all use of this function is
8530 * to retrieve a new value and anything we return
8531 * here would be discarded.)
8532 */
8533VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8534 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8535{
8536 Assert(cbMem < UINT8_MAX);
8537
8538 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8539 RTGCPTR GCPtrTop;
8540 if (IEM_IS_64BIT_CODE(pVCpu))
8541 GCPtrTop = uCurNewRsp;
8542 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8543 GCPtrTop = (uint32_t)uCurNewRsp;
8544 else
8545 GCPtrTop = (uint16_t)uCurNewRsp;
8546
8547 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8548 0 /* checked in iemMemStackPopBeginSpecial */);
8549}
8550
8551
8552/**
8553 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8554 * iemMemStackPopContinueSpecial).
8555 *
8556 * The caller will manually commit the rSP.
8557 *
8558 * @returns Strict VBox status code.
8559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8560 * @param pvMem The pointer returned by
8561 * iemMemStackPopBeginSpecial() or
8562 * iemMemStackPopContinueSpecial().
8563 */
8564VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8565{
8566 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8567}
8568
8569
8570/**
8571 * Fetches a system table byte.
8572 *
8573 * @returns Strict VBox status code.
8574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8575 * @param pbDst Where to return the byte.
8576 * @param iSegReg The index of the segment register to use for
8577 * this access. The base and limits are checked.
8578 * @param GCPtrMem The address of the guest memory.
8579 */
8580VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8581{
8582 /* The lazy approach for now... */
8583 uint8_t const *pbSrc;
8584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8585 if (rc == VINF_SUCCESS)
8586 {
8587 *pbDst = *pbSrc;
8588 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8589 }
8590 return rc;
8591}
8592
8593
8594/**
8595 * Fetches a system table word.
8596 *
8597 * @returns Strict VBox status code.
8598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8599 * @param pu16Dst Where to return the word.
8600 * @param iSegReg The index of the segment register to use for
8601 * this access. The base and limits are checked.
8602 * @param GCPtrMem The address of the guest memory.
8603 */
8604VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8605{
8606 /* The lazy approach for now... */
8607 uint16_t const *pu16Src;
8608 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8609 if (rc == VINF_SUCCESS)
8610 {
8611 *pu16Dst = *pu16Src;
8612 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8613 }
8614 return rc;
8615}
8616
8617
8618/**
8619 * Fetches a system table dword.
8620 *
8621 * @returns Strict VBox status code.
8622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8623 * @param pu32Dst Where to return the dword.
8624 * @param iSegReg The index of the segment register to use for
8625 * this access. The base and limits are checked.
8626 * @param GCPtrMem The address of the guest memory.
8627 */
8628VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8629{
8630 /* The lazy approach for now... */
8631 uint32_t const *pu32Src;
8632 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8633 if (rc == VINF_SUCCESS)
8634 {
8635 *pu32Dst = *pu32Src;
8636 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8637 }
8638 return rc;
8639}
8640
8641
8642/**
8643 * Fetches a system table qword.
8644 *
8645 * @returns Strict VBox status code.
8646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8647 * @param pu64Dst Where to return the qword.
8648 * @param iSegReg The index of the segment register to use for
8649 * this access. The base and limits are checked.
8650 * @param GCPtrMem The address of the guest memory.
8651 */
8652VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8653{
8654 /* The lazy approach for now... */
8655 uint64_t const *pu64Src;
8656 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8657 if (rc == VINF_SUCCESS)
8658 {
8659 *pu64Dst = *pu64Src;
8660 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8661 }
8662 return rc;
8663}
8664
8665
8666/**
8667 * Fetches a descriptor table entry with caller specified error code.
8668 *
8669 * @returns Strict VBox status code.
8670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8671 * @param pDesc Where to return the descriptor table entry.
8672 * @param uSel The selector which table entry to fetch.
8673 * @param uXcpt The exception to raise on table lookup error.
8674 * @param uErrorCode The error code associated with the exception.
8675 */
8676static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8677 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8678{
8679 AssertPtr(pDesc);
8680 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8681
8682 /** @todo did the 286 require all 8 bytes to be accessible? */
8683 /*
8684 * Get the selector table base and check bounds.
8685 */
8686 RTGCPTR GCPtrBase;
8687 if (uSel & X86_SEL_LDT)
8688 {
8689 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8690 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8691 {
8692 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8693 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8694 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8695 uErrorCode, 0);
8696 }
8697
8698 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8699 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8700 }
8701 else
8702 {
8703 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8704 {
8705 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8706 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8707 uErrorCode, 0);
8708 }
8709 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8710 }
8711
8712 /*
8713 * Read the legacy descriptor and maybe the long mode extensions if
8714 * required.
8715 */
8716 VBOXSTRICTRC rcStrict;
8717 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8718 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8719 else
8720 {
8721 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8722 if (rcStrict == VINF_SUCCESS)
8723 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8724 if (rcStrict == VINF_SUCCESS)
8725 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8726 if (rcStrict == VINF_SUCCESS)
8727 pDesc->Legacy.au16[3] = 0;
8728 else
8729 return rcStrict;
8730 }
8731
8732 if (rcStrict == VINF_SUCCESS)
8733 {
8734 if ( !IEM_IS_LONG_MODE(pVCpu)
8735 || pDesc->Legacy.Gen.u1DescType)
8736 pDesc->Long.au64[1] = 0;
8737 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8738 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8739 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8740 else
8741 {
8742 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8743 /** @todo is this the right exception? */
8744 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8745 }
8746 }
8747 return rcStrict;
8748}
8749
8750
8751/**
8752 * Fetches a descriptor table entry.
8753 *
8754 * @returns Strict VBox status code.
8755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8756 * @param pDesc Where to return the descriptor table entry.
8757 * @param uSel The selector which table entry to fetch.
8758 * @param uXcpt The exception to raise on table lookup error.
8759 */
8760VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8761{
8762 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8763}
8764
8765
8766/**
8767 * Marks the selector descriptor as accessed (only non-system descriptors).
8768 *
8769 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8770 * will therefore skip the limit checks.
8771 *
8772 * @returns Strict VBox status code.
8773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8774 * @param uSel The selector.
8775 */
8776VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8777{
8778 /*
8779 * Get the selector table base and calculate the entry address.
8780 */
8781 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8782 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8783 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8784 GCPtr += uSel & X86_SEL_MASK;
8785
8786 /*
8787 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8788 * ugly stuff to avoid this. This will make sure it's an atomic access
8789 * as well more or less remove any question about 8-bit or 32-bit accesss.
8790 */
8791 VBOXSTRICTRC rcStrict;
8792 uint32_t volatile *pu32;
8793 if ((GCPtr & 3) == 0)
8794 {
8795 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8796 GCPtr += 2 + 2;
8797 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8798 if (rcStrict != VINF_SUCCESS)
8799 return rcStrict;
8800 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8801 }
8802 else
8803 {
8804 /* The misaligned GDT/LDT case, map the whole thing. */
8805 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8806 if (rcStrict != VINF_SUCCESS)
8807 return rcStrict;
8808 switch ((uintptr_t)pu32 & 3)
8809 {
8810 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8811 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8812 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8813 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8814 }
8815 }
8816
8817 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8818}
8819
8820/** @} */
8821
8822/** @name Opcode Helpers.
8823 * @{
8824 */
8825
8826/**
8827 * Calculates the effective address of a ModR/M memory operand.
8828 *
8829 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8830 *
8831 * @return Strict VBox status code.
8832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8833 * @param bRm The ModRM byte.
8834 * @param cbImmAndRspOffset - First byte: The size of any immediate
8835 * following the effective address opcode bytes
8836 * (only for RIP relative addressing).
8837 * - Second byte: RSP displacement (for POP [ESP]).
8838 * @param pGCPtrEff Where to return the effective address.
8839 */
8840VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8841{
8842 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8843# define SET_SS_DEF() \
8844 do \
8845 { \
8846 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8847 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8848 } while (0)
8849
8850 if (!IEM_IS_64BIT_CODE(pVCpu))
8851 {
8852/** @todo Check the effective address size crap! */
8853 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8854 {
8855 uint16_t u16EffAddr;
8856
8857 /* Handle the disp16 form with no registers first. */
8858 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8859 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8860 else
8861 {
8862 /* Get the displacment. */
8863 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8864 {
8865 case 0: u16EffAddr = 0; break;
8866 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8867 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8868 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8869 }
8870
8871 /* Add the base and index registers to the disp. */
8872 switch (bRm & X86_MODRM_RM_MASK)
8873 {
8874 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8875 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8876 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8877 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8878 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8879 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8880 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8881 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8882 }
8883 }
8884
8885 *pGCPtrEff = u16EffAddr;
8886 }
8887 else
8888 {
8889 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8890 uint32_t u32EffAddr;
8891
8892 /* Handle the disp32 form with no registers first. */
8893 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8894 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8895 else
8896 {
8897 /* Get the register (or SIB) value. */
8898 switch ((bRm & X86_MODRM_RM_MASK))
8899 {
8900 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8901 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8902 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8903 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8904 case 4: /* SIB */
8905 {
8906 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8907
8908 /* Get the index and scale it. */
8909 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8910 {
8911 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8912 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8913 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8914 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8915 case 4: u32EffAddr = 0; /*none */ break;
8916 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8917 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8918 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8919 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8920 }
8921 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8922
8923 /* add base */
8924 switch (bSib & X86_SIB_BASE_MASK)
8925 {
8926 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8927 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8928 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8929 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8930 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8931 case 5:
8932 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8933 {
8934 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8935 SET_SS_DEF();
8936 }
8937 else
8938 {
8939 uint32_t u32Disp;
8940 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8941 u32EffAddr += u32Disp;
8942 }
8943 break;
8944 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8945 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8946 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8947 }
8948 break;
8949 }
8950 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8951 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8952 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8953 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8954 }
8955
8956 /* Get and add the displacement. */
8957 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8958 {
8959 case 0:
8960 break;
8961 case 1:
8962 {
8963 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8964 u32EffAddr += i8Disp;
8965 break;
8966 }
8967 case 2:
8968 {
8969 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8970 u32EffAddr += u32Disp;
8971 break;
8972 }
8973 default:
8974 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8975 }
8976
8977 }
8978 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8979 *pGCPtrEff = u32EffAddr;
8980 else
8981 {
8982 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8983 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8984 }
8985 }
8986 }
8987 else
8988 {
8989 uint64_t u64EffAddr;
8990
8991 /* Handle the rip+disp32 form with no registers first. */
8992 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8993 {
8994 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8995 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8996 }
8997 else
8998 {
8999 /* Get the register (or SIB) value. */
9000 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9001 {
9002 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9003 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9004 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9005 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9006 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9007 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9008 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9009 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9010 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9011 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9012 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9013 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9014 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9015 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9016 /* SIB */
9017 case 4:
9018 case 12:
9019 {
9020 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9021
9022 /* Get the index and scale it. */
9023 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9024 {
9025 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9026 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9027 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9028 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9029 case 4: u64EffAddr = 0; /*none */ break;
9030 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9031 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9032 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9033 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9034 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9035 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9036 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9037 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9038 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9039 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9040 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9041 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9042 }
9043 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9044
9045 /* add base */
9046 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9047 {
9048 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9049 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9050 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9051 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9052 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9053 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9054 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9055 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9056 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9057 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9058 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9059 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9060 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9061 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9062 /* complicated encodings */
9063 case 5:
9064 case 13:
9065 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9066 {
9067 if (!pVCpu->iem.s.uRexB)
9068 {
9069 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9070 SET_SS_DEF();
9071 }
9072 else
9073 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9074 }
9075 else
9076 {
9077 uint32_t u32Disp;
9078 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9079 u64EffAddr += (int32_t)u32Disp;
9080 }
9081 break;
9082 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9083 }
9084 break;
9085 }
9086 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9087 }
9088
9089 /* Get and add the displacement. */
9090 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9091 {
9092 case 0:
9093 break;
9094 case 1:
9095 {
9096 int8_t i8Disp;
9097 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9098 u64EffAddr += i8Disp;
9099 break;
9100 }
9101 case 2:
9102 {
9103 uint32_t u32Disp;
9104 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9105 u64EffAddr += (int32_t)u32Disp;
9106 break;
9107 }
9108 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9109 }
9110
9111 }
9112
9113 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9114 *pGCPtrEff = u64EffAddr;
9115 else
9116 {
9117 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9118 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9119 }
9120 }
9121
9122 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9123 return VINF_SUCCESS;
9124}
9125
9126
9127#ifdef IEM_WITH_SETJMP
9128/**
9129 * Calculates the effective address of a ModR/M memory operand.
9130 *
9131 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9132 *
9133 * May longjmp on internal error.
9134 *
9135 * @return The effective address.
9136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9137 * @param bRm The ModRM byte.
9138 * @param cbImmAndRspOffset - First byte: The size of any immediate
9139 * following the effective address opcode bytes
9140 * (only for RIP relative addressing).
9141 * - Second byte: RSP displacement (for POP [ESP]).
9142 */
9143RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9144{
9145 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9146# define SET_SS_DEF() \
9147 do \
9148 { \
9149 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9150 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9151 } while (0)
9152
9153 if (!IEM_IS_64BIT_CODE(pVCpu))
9154 {
9155/** @todo Check the effective address size crap! */
9156 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9157 {
9158 uint16_t u16EffAddr;
9159
9160 /* Handle the disp16 form with no registers first. */
9161 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9162 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9163 else
9164 {
9165 /* Get the displacment. */
9166 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9167 {
9168 case 0: u16EffAddr = 0; break;
9169 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9170 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9171 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9172 }
9173
9174 /* Add the base and index registers to the disp. */
9175 switch (bRm & X86_MODRM_RM_MASK)
9176 {
9177 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9178 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9179 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9180 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9181 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9182 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9183 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9184 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9185 }
9186 }
9187
9188 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9189 return u16EffAddr;
9190 }
9191
9192 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9193 uint32_t u32EffAddr;
9194
9195 /* Handle the disp32 form with no registers first. */
9196 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9197 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9198 else
9199 {
9200 /* Get the register (or SIB) value. */
9201 switch ((bRm & X86_MODRM_RM_MASK))
9202 {
9203 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9204 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9205 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9206 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9207 case 4: /* SIB */
9208 {
9209 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9210
9211 /* Get the index and scale it. */
9212 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9213 {
9214 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9215 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9216 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9217 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9218 case 4: u32EffAddr = 0; /*none */ break;
9219 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9220 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9221 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9222 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9223 }
9224 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9225
9226 /* add base */
9227 switch (bSib & X86_SIB_BASE_MASK)
9228 {
9229 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9230 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9231 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9232 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9233 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9234 case 5:
9235 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9236 {
9237 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9238 SET_SS_DEF();
9239 }
9240 else
9241 {
9242 uint32_t u32Disp;
9243 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9244 u32EffAddr += u32Disp;
9245 }
9246 break;
9247 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9248 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9249 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9250 }
9251 break;
9252 }
9253 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9254 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9255 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9256 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9257 }
9258
9259 /* Get and add the displacement. */
9260 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9261 {
9262 case 0:
9263 break;
9264 case 1:
9265 {
9266 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9267 u32EffAddr += i8Disp;
9268 break;
9269 }
9270 case 2:
9271 {
9272 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9273 u32EffAddr += u32Disp;
9274 break;
9275 }
9276 default:
9277 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9278 }
9279 }
9280
9281 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9282 {
9283 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9284 return u32EffAddr;
9285 }
9286 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9287 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9288 return u32EffAddr & UINT16_MAX;
9289 }
9290
9291 uint64_t u64EffAddr;
9292
9293 /* Handle the rip+disp32 form with no registers first. */
9294 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9295 {
9296 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9297 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9298 }
9299 else
9300 {
9301 /* Get the register (or SIB) value. */
9302 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9303 {
9304 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9305 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9306 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9307 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9308 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9309 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9310 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9311 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9312 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9313 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9314 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9315 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9316 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9317 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9318 /* SIB */
9319 case 4:
9320 case 12:
9321 {
9322 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9323
9324 /* Get the index and scale it. */
9325 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9326 {
9327 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9328 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9329 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9330 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9331 case 4: u64EffAddr = 0; /*none */ break;
9332 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9333 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9334 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9335 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9336 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9337 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9338 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9339 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9340 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9341 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9342 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9343 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9344 }
9345 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9346
9347 /* add base */
9348 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9349 {
9350 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9351 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9352 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9353 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9354 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9355 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9356 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9357 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9358 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9359 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9360 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9361 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9362 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9363 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9364 /* complicated encodings */
9365 case 5:
9366 case 13:
9367 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9368 {
9369 if (!pVCpu->iem.s.uRexB)
9370 {
9371 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9372 SET_SS_DEF();
9373 }
9374 else
9375 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9376 }
9377 else
9378 {
9379 uint32_t u32Disp;
9380 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9381 u64EffAddr += (int32_t)u32Disp;
9382 }
9383 break;
9384 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9385 }
9386 break;
9387 }
9388 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9389 }
9390
9391 /* Get and add the displacement. */
9392 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9393 {
9394 case 0:
9395 break;
9396 case 1:
9397 {
9398 int8_t i8Disp;
9399 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9400 u64EffAddr += i8Disp;
9401 break;
9402 }
9403 case 2:
9404 {
9405 uint32_t u32Disp;
9406 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9407 u64EffAddr += (int32_t)u32Disp;
9408 break;
9409 }
9410 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9411 }
9412
9413 }
9414
9415 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9416 {
9417 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9418 return u64EffAddr;
9419 }
9420 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9421 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9422 return u64EffAddr & UINT32_MAX;
9423}
9424#endif /* IEM_WITH_SETJMP */
9425
9426
9427/**
9428 * Calculates the effective address of a ModR/M memory operand, extended version
9429 * for use in the recompilers.
9430 *
9431 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9432 *
9433 * @return Strict VBox status code.
9434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9435 * @param bRm The ModRM byte.
9436 * @param cbImmAndRspOffset - First byte: The size of any immediate
9437 * following the effective address opcode bytes
9438 * (only for RIP relative addressing).
9439 * - Second byte: RSP displacement (for POP [ESP]).
9440 * @param pGCPtrEff Where to return the effective address.
9441 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9442 * SIB byte (bits 39:32).
9443 */
9444VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9445{
9446 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9447# define SET_SS_DEF() \
9448 do \
9449 { \
9450 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9451 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9452 } while (0)
9453
9454 uint64_t uInfo;
9455 if (!IEM_IS_64BIT_CODE(pVCpu))
9456 {
9457/** @todo Check the effective address size crap! */
9458 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9459 {
9460 uint16_t u16EffAddr;
9461
9462 /* Handle the disp16 form with no registers first. */
9463 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9464 {
9465 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9466 uInfo = u16EffAddr;
9467 }
9468 else
9469 {
9470 /* Get the displacment. */
9471 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9472 {
9473 case 0: u16EffAddr = 0; break;
9474 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9475 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9476 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9477 }
9478 uInfo = u16EffAddr;
9479
9480 /* Add the base and index registers to the disp. */
9481 switch (bRm & X86_MODRM_RM_MASK)
9482 {
9483 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9484 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9485 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9486 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9487 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9488 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9489 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9490 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9491 }
9492 }
9493
9494 *pGCPtrEff = u16EffAddr;
9495 }
9496 else
9497 {
9498 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9499 uint32_t u32EffAddr;
9500
9501 /* Handle the disp32 form with no registers first. */
9502 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9503 {
9504 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9505 uInfo = u32EffAddr;
9506 }
9507 else
9508 {
9509 /* Get the register (or SIB) value. */
9510 uInfo = 0;
9511 switch ((bRm & X86_MODRM_RM_MASK))
9512 {
9513 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9514 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9515 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9516 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9517 case 4: /* SIB */
9518 {
9519 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9520 uInfo = (uint64_t)bSib << 32;
9521
9522 /* Get the index and scale it. */
9523 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9524 {
9525 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9526 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9527 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9528 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9529 case 4: u32EffAddr = 0; /*none */ break;
9530 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9531 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9532 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9533 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9534 }
9535 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9536
9537 /* add base */
9538 switch (bSib & X86_SIB_BASE_MASK)
9539 {
9540 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9541 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9542 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9543 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9544 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9545 case 5:
9546 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9547 {
9548 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9549 SET_SS_DEF();
9550 }
9551 else
9552 {
9553 uint32_t u32Disp;
9554 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9555 u32EffAddr += u32Disp;
9556 uInfo |= u32Disp;
9557 }
9558 break;
9559 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9560 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9561 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9562 }
9563 break;
9564 }
9565 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9566 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9567 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9568 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9569 }
9570
9571 /* Get and add the displacement. */
9572 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9573 {
9574 case 0:
9575 break;
9576 case 1:
9577 {
9578 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9579 u32EffAddr += i8Disp;
9580 uInfo |= (uint32_t)(int32_t)i8Disp;
9581 break;
9582 }
9583 case 2:
9584 {
9585 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9586 u32EffAddr += u32Disp;
9587 uInfo |= (uint32_t)u32Disp;
9588 break;
9589 }
9590 default:
9591 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9592 }
9593
9594 }
9595 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9596 *pGCPtrEff = u32EffAddr;
9597 else
9598 {
9599 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9600 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9601 }
9602 }
9603 }
9604 else
9605 {
9606 uint64_t u64EffAddr;
9607
9608 /* Handle the rip+disp32 form with no registers first. */
9609 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9610 {
9611 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9612 uInfo = (uint32_t)u64EffAddr;
9613 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9614 }
9615 else
9616 {
9617 /* Get the register (or SIB) value. */
9618 uInfo = 0;
9619 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9620 {
9621 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9622 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9623 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9624 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9625 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9626 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9627 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9628 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9629 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9630 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9631 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9632 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9633 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9634 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9635 /* SIB */
9636 case 4:
9637 case 12:
9638 {
9639 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9640 uInfo = (uint64_t)bSib << 32;
9641
9642 /* Get the index and scale it. */
9643 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9644 {
9645 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9646 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9647 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9648 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9649 case 4: u64EffAddr = 0; /*none */ break;
9650 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9651 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9652 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9653 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9654 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9655 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9656 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9657 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9658 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9659 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9660 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9661 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9662 }
9663 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9664
9665 /* add base */
9666 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9667 {
9668 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9669 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9670 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9671 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9672 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9673 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9674 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9675 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9676 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9677 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9678 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9679 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9680 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9681 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9682 /* complicated encodings */
9683 case 5:
9684 case 13:
9685 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9686 {
9687 if (!pVCpu->iem.s.uRexB)
9688 {
9689 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9690 SET_SS_DEF();
9691 }
9692 else
9693 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9694 }
9695 else
9696 {
9697 uint32_t u32Disp;
9698 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9699 u64EffAddr += (int32_t)u32Disp;
9700 uInfo |= u32Disp;
9701 }
9702 break;
9703 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9704 }
9705 break;
9706 }
9707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9708 }
9709
9710 /* Get and add the displacement. */
9711 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9712 {
9713 case 0:
9714 break;
9715 case 1:
9716 {
9717 int8_t i8Disp;
9718 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9719 u64EffAddr += i8Disp;
9720 uInfo |= (uint32_t)(int32_t)i8Disp;
9721 break;
9722 }
9723 case 2:
9724 {
9725 uint32_t u32Disp;
9726 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9727 u64EffAddr += (int32_t)u32Disp;
9728 uInfo |= u32Disp;
9729 break;
9730 }
9731 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9732 }
9733
9734 }
9735
9736 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9737 *pGCPtrEff = u64EffAddr;
9738 else
9739 {
9740 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9741 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9742 }
9743 }
9744 *puInfo = uInfo;
9745
9746 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9747 return VINF_SUCCESS;
9748}
9749
9750/** @} */
9751
9752
9753#ifdef LOG_ENABLED
9754/**
9755 * Logs the current instruction.
9756 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9757 * @param fSameCtx Set if we have the same context information as the VMM,
9758 * clear if we may have already executed an instruction in
9759 * our debug context. When clear, we assume IEMCPU holds
9760 * valid CPU mode info.
9761 *
9762 * The @a fSameCtx parameter is now misleading and obsolete.
9763 * @param pszFunction The IEM function doing the execution.
9764 */
9765static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9766{
9767# ifdef IN_RING3
9768 if (LogIs2Enabled())
9769 {
9770 char szInstr[256];
9771 uint32_t cbInstr = 0;
9772 if (fSameCtx)
9773 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9774 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9775 szInstr, sizeof(szInstr), &cbInstr);
9776 else
9777 {
9778 uint32_t fFlags = 0;
9779 switch (IEM_GET_CPU_MODE(pVCpu))
9780 {
9781 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9782 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9783 case IEMMODE_16BIT:
9784 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9785 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9786 else
9787 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9788 break;
9789 }
9790 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9791 szInstr, sizeof(szInstr), &cbInstr);
9792 }
9793
9794 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9795 Log2(("**** %s fExec=%x\n"
9796 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9797 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9798 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9799 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9800 " %s\n"
9801 , pszFunction, pVCpu->iem.s.fExec,
9802 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9803 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9804 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9805 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9806 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9807 szInstr));
9808
9809 if (LogIs3Enabled())
9810 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9811 }
9812 else
9813# endif
9814 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9815 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9816 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9817}
9818#endif /* LOG_ENABLED */
9819
9820
9821#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9822/**
9823 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9824 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9825 *
9826 * @returns Modified rcStrict.
9827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9828 * @param rcStrict The instruction execution status.
9829 */
9830static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9831{
9832 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9833 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9834 {
9835 /* VMX preemption timer takes priority over NMI-window exits. */
9836 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9837 {
9838 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9839 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9840 }
9841 /*
9842 * Check remaining intercepts.
9843 *
9844 * NMI-window and Interrupt-window VM-exits.
9845 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9846 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9847 *
9848 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9849 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9850 */
9851 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9852 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9853 && !TRPMHasTrap(pVCpu))
9854 {
9855 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9856 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9857 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9858 {
9859 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9860 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9861 }
9862 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9863 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9864 {
9865 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9866 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9867 }
9868 }
9869 }
9870 /* TPR-below threshold/APIC write has the highest priority. */
9871 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9872 {
9873 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9874 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9875 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9876 }
9877 /* MTF takes priority over VMX-preemption timer. */
9878 else
9879 {
9880 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9881 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9882 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9883 }
9884 return rcStrict;
9885}
9886#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9887
9888
9889/**
9890 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9891 * IEMExecOneWithPrefetchedByPC.
9892 *
9893 * Similar code is found in IEMExecLots.
9894 *
9895 * @return Strict VBox status code.
9896 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9897 * @param fExecuteInhibit If set, execute the instruction following CLI,
9898 * POP SS and MOV SS,GR.
9899 * @param pszFunction The calling function name.
9900 */
9901DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9902{
9903 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9904 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9905 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9906 RT_NOREF_PV(pszFunction);
9907
9908#ifdef IEM_WITH_SETJMP
9909 VBOXSTRICTRC rcStrict;
9910 IEM_TRY_SETJMP(pVCpu, rcStrict)
9911 {
9912 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9913 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9914 }
9915 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9916 {
9917 pVCpu->iem.s.cLongJumps++;
9918 }
9919 IEM_CATCH_LONGJMP_END(pVCpu);
9920#else
9921 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9922 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9923#endif
9924 if (rcStrict == VINF_SUCCESS)
9925 pVCpu->iem.s.cInstructions++;
9926 if (pVCpu->iem.s.cActiveMappings > 0)
9927 {
9928 Assert(rcStrict != VINF_SUCCESS);
9929 iemMemRollback(pVCpu);
9930 }
9931 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9932 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9933 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9934
9935//#ifdef DEBUG
9936// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9937//#endif
9938
9939#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9940 /*
9941 * Perform any VMX nested-guest instruction boundary actions.
9942 *
9943 * If any of these causes a VM-exit, we must skip executing the next
9944 * instruction (would run into stale page tables). A VM-exit makes sure
9945 * there is no interrupt-inhibition, so that should ensure we don't go
9946 * to try execute the next instruction. Clearing fExecuteInhibit is
9947 * problematic because of the setjmp/longjmp clobbering above.
9948 */
9949 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9950 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9951 || rcStrict != VINF_SUCCESS)
9952 { /* likely */ }
9953 else
9954 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9955#endif
9956
9957 /* Execute the next instruction as well if a cli, pop ss or
9958 mov ss, Gr has just completed successfully. */
9959 if ( fExecuteInhibit
9960 && rcStrict == VINF_SUCCESS
9961 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9962 {
9963 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9964 if (rcStrict == VINF_SUCCESS)
9965 {
9966#ifdef LOG_ENABLED
9967 iemLogCurInstr(pVCpu, false, pszFunction);
9968#endif
9969#ifdef IEM_WITH_SETJMP
9970 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9971 {
9972 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9973 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9974 }
9975 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9976 {
9977 pVCpu->iem.s.cLongJumps++;
9978 }
9979 IEM_CATCH_LONGJMP_END(pVCpu);
9980#else
9981 IEM_OPCODE_GET_FIRST_U8(&b);
9982 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9983#endif
9984 if (rcStrict == VINF_SUCCESS)
9985 {
9986 pVCpu->iem.s.cInstructions++;
9987#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9988 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9989 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9990 { /* likely */ }
9991 else
9992 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9993#endif
9994 }
9995 if (pVCpu->iem.s.cActiveMappings > 0)
9996 {
9997 Assert(rcStrict != VINF_SUCCESS);
9998 iemMemRollback(pVCpu);
9999 }
10000 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10001 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10002 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10003 }
10004 else if (pVCpu->iem.s.cActiveMappings > 0)
10005 iemMemRollback(pVCpu);
10006 /** @todo drop this after we bake this change into RIP advancing. */
10007 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10008 }
10009
10010 /*
10011 * Return value fiddling, statistics and sanity assertions.
10012 */
10013 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10014
10015 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10016 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10017 return rcStrict;
10018}
10019
10020
10021/**
10022 * Execute one instruction.
10023 *
10024 * @return Strict VBox status code.
10025 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10026 */
10027VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10028{
10029 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10030#ifdef LOG_ENABLED
10031 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10032#endif
10033
10034 /*
10035 * Do the decoding and emulation.
10036 */
10037 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10038 if (rcStrict == VINF_SUCCESS)
10039 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10040 else if (pVCpu->iem.s.cActiveMappings > 0)
10041 iemMemRollback(pVCpu);
10042
10043 if (rcStrict != VINF_SUCCESS)
10044 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10045 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10046 return rcStrict;
10047}
10048
10049
10050VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10051{
10052 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10053 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10054 if (rcStrict == VINF_SUCCESS)
10055 {
10056 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10057 if (pcbWritten)
10058 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10059 }
10060 else if (pVCpu->iem.s.cActiveMappings > 0)
10061 iemMemRollback(pVCpu);
10062
10063 return rcStrict;
10064}
10065
10066
10067VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10068 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10069{
10070 VBOXSTRICTRC rcStrict;
10071 if ( cbOpcodeBytes
10072 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10073 {
10074 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10075#ifdef IEM_WITH_CODE_TLB
10076 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10077 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10078 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10079 pVCpu->iem.s.offCurInstrStart = 0;
10080 pVCpu->iem.s.offInstrNextByte = 0;
10081 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10082#else
10083 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10084 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10085#endif
10086 rcStrict = VINF_SUCCESS;
10087 }
10088 else
10089 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10090 if (rcStrict == VINF_SUCCESS)
10091 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10092 else if (pVCpu->iem.s.cActiveMappings > 0)
10093 iemMemRollback(pVCpu);
10094
10095 return rcStrict;
10096}
10097
10098
10099VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10100{
10101 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10102 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10103 if (rcStrict == VINF_SUCCESS)
10104 {
10105 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10106 if (pcbWritten)
10107 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10108 }
10109 else if (pVCpu->iem.s.cActiveMappings > 0)
10110 iemMemRollback(pVCpu);
10111
10112 return rcStrict;
10113}
10114
10115
10116VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10117 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10118{
10119 VBOXSTRICTRC rcStrict;
10120 if ( cbOpcodeBytes
10121 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10122 {
10123 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10124#ifdef IEM_WITH_CODE_TLB
10125 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10126 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10127 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10128 pVCpu->iem.s.offCurInstrStart = 0;
10129 pVCpu->iem.s.offInstrNextByte = 0;
10130 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10131#else
10132 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10133 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10134#endif
10135 rcStrict = VINF_SUCCESS;
10136 }
10137 else
10138 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10139 if (rcStrict == VINF_SUCCESS)
10140 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10141 else if (pVCpu->iem.s.cActiveMappings > 0)
10142 iemMemRollback(pVCpu);
10143
10144 return rcStrict;
10145}
10146
10147
10148/**
10149 * For handling split cacheline lock operations when the host has split-lock
10150 * detection enabled.
10151 *
10152 * This will cause the interpreter to disregard the lock prefix and implicit
10153 * locking (xchg).
10154 *
10155 * @returns Strict VBox status code.
10156 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10157 */
10158VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10159{
10160 /*
10161 * Do the decoding and emulation.
10162 */
10163 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10164 if (rcStrict == VINF_SUCCESS)
10165 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10166 else if (pVCpu->iem.s.cActiveMappings > 0)
10167 iemMemRollback(pVCpu);
10168
10169 if (rcStrict != VINF_SUCCESS)
10170 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10171 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10172 return rcStrict;
10173}
10174
10175
10176/**
10177 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10178 * inject a pending TRPM trap.
10179 */
10180VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10181{
10182 Assert(TRPMHasTrap(pVCpu));
10183
10184 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10185 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10186 {
10187 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10188#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10189 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10190 if (fIntrEnabled)
10191 {
10192 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10193 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10194 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10195 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10196 else
10197 {
10198 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10199 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10200 }
10201 }
10202#else
10203 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10204#endif
10205 if (fIntrEnabled)
10206 {
10207 uint8_t u8TrapNo;
10208 TRPMEVENT enmType;
10209 uint32_t uErrCode;
10210 RTGCPTR uCr2;
10211 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10212 AssertRC(rc2);
10213 Assert(enmType == TRPM_HARDWARE_INT);
10214 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10215
10216 TRPMResetTrap(pVCpu);
10217
10218#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10219 /* Injecting an event may cause a VM-exit. */
10220 if ( rcStrict != VINF_SUCCESS
10221 && rcStrict != VINF_IEM_RAISED_XCPT)
10222 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10223#else
10224 NOREF(rcStrict);
10225#endif
10226 }
10227 }
10228
10229 return VINF_SUCCESS;
10230}
10231
10232
10233VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10234{
10235 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10236 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10237 Assert(cMaxInstructions > 0);
10238
10239 /*
10240 * See if there is an interrupt pending in TRPM, inject it if we can.
10241 */
10242 /** @todo What if we are injecting an exception and not an interrupt? Is that
10243 * possible here? For now we assert it is indeed only an interrupt. */
10244 if (!TRPMHasTrap(pVCpu))
10245 { /* likely */ }
10246 else
10247 {
10248 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10249 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10250 { /*likely */ }
10251 else
10252 return rcStrict;
10253 }
10254
10255 /*
10256 * Initial decoder init w/ prefetch, then setup setjmp.
10257 */
10258 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10259 if (rcStrict == VINF_SUCCESS)
10260 {
10261#ifdef IEM_WITH_SETJMP
10262 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10263 IEM_TRY_SETJMP(pVCpu, rcStrict)
10264#endif
10265 {
10266 /*
10267 * The run loop. We limit ourselves to 4096 instructions right now.
10268 */
10269 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10270 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10271 for (;;)
10272 {
10273 /*
10274 * Log the state.
10275 */
10276#ifdef LOG_ENABLED
10277 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10278#endif
10279
10280 /*
10281 * Do the decoding and emulation.
10282 */
10283 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10284 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10285#ifdef VBOX_STRICT
10286 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10287#endif
10288 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10289 {
10290 Assert(pVCpu->iem.s.cActiveMappings == 0);
10291 pVCpu->iem.s.cInstructions++;
10292
10293#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10294 /* Perform any VMX nested-guest instruction boundary actions. */
10295 uint64_t fCpu = pVCpu->fLocalForcedActions;
10296 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10297 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10298 { /* likely */ }
10299 else
10300 {
10301 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10302 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10303 fCpu = pVCpu->fLocalForcedActions;
10304 else
10305 {
10306 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10307 break;
10308 }
10309 }
10310#endif
10311 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10312 {
10313#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10314 uint64_t fCpu = pVCpu->fLocalForcedActions;
10315#endif
10316 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10317 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10318 | VMCPU_FF_TLB_FLUSH
10319 | VMCPU_FF_UNHALT );
10320
10321 if (RT_LIKELY( ( !fCpu
10322 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10323 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10324 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10325 {
10326 if (--cMaxInstructionsGccStupidity > 0)
10327 {
10328 /* Poll timers every now an then according to the caller's specs. */
10329 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10330 || !TMTimerPollBool(pVM, pVCpu))
10331 {
10332 Assert(pVCpu->iem.s.cActiveMappings == 0);
10333 iemReInitDecoder(pVCpu);
10334 continue;
10335 }
10336 }
10337 }
10338 }
10339 Assert(pVCpu->iem.s.cActiveMappings == 0);
10340 }
10341 else if (pVCpu->iem.s.cActiveMappings > 0)
10342 iemMemRollback(pVCpu);
10343 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10344 break;
10345 }
10346 }
10347#ifdef IEM_WITH_SETJMP
10348 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10349 {
10350 if (pVCpu->iem.s.cActiveMappings > 0)
10351 iemMemRollback(pVCpu);
10352# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10353 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10354# endif
10355 pVCpu->iem.s.cLongJumps++;
10356 }
10357 IEM_CATCH_LONGJMP_END(pVCpu);
10358#endif
10359
10360 /*
10361 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10362 */
10363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10364 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10365 }
10366 else
10367 {
10368 if (pVCpu->iem.s.cActiveMappings > 0)
10369 iemMemRollback(pVCpu);
10370
10371#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10372 /*
10373 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10374 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10375 */
10376 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10377#endif
10378 }
10379
10380 /*
10381 * Maybe re-enter raw-mode and log.
10382 */
10383 if (rcStrict != VINF_SUCCESS)
10384 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10385 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10386 if (pcInstructions)
10387 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10388 return rcStrict;
10389}
10390
10391
10392/**
10393 * Interface used by EMExecuteExec, does exit statistics and limits.
10394 *
10395 * @returns Strict VBox status code.
10396 * @param pVCpu The cross context virtual CPU structure.
10397 * @param fWillExit To be defined.
10398 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10399 * @param cMaxInstructions Maximum number of instructions to execute.
10400 * @param cMaxInstructionsWithoutExits
10401 * The max number of instructions without exits.
10402 * @param pStats Where to return statistics.
10403 */
10404VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10405 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10406{
10407 NOREF(fWillExit); /** @todo define flexible exit crits */
10408
10409 /*
10410 * Initialize return stats.
10411 */
10412 pStats->cInstructions = 0;
10413 pStats->cExits = 0;
10414 pStats->cMaxExitDistance = 0;
10415 pStats->cReserved = 0;
10416
10417 /*
10418 * Initial decoder init w/ prefetch, then setup setjmp.
10419 */
10420 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10421 if (rcStrict == VINF_SUCCESS)
10422 {
10423#ifdef IEM_WITH_SETJMP
10424 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10425 IEM_TRY_SETJMP(pVCpu, rcStrict)
10426#endif
10427 {
10428#ifdef IN_RING0
10429 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10430#endif
10431 uint32_t cInstructionSinceLastExit = 0;
10432
10433 /*
10434 * The run loop. We limit ourselves to 4096 instructions right now.
10435 */
10436 PVM pVM = pVCpu->CTX_SUFF(pVM);
10437 for (;;)
10438 {
10439 /*
10440 * Log the state.
10441 */
10442#ifdef LOG_ENABLED
10443 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10444#endif
10445
10446 /*
10447 * Do the decoding and emulation.
10448 */
10449 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10450
10451 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10452 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10453
10454 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10455 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10456 {
10457 pStats->cExits += 1;
10458 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10459 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10460 cInstructionSinceLastExit = 0;
10461 }
10462
10463 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10464 {
10465 Assert(pVCpu->iem.s.cActiveMappings == 0);
10466 pVCpu->iem.s.cInstructions++;
10467 pStats->cInstructions++;
10468 cInstructionSinceLastExit++;
10469
10470#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10471 /* Perform any VMX nested-guest instruction boundary actions. */
10472 uint64_t fCpu = pVCpu->fLocalForcedActions;
10473 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10474 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10475 { /* likely */ }
10476 else
10477 {
10478 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10479 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10480 fCpu = pVCpu->fLocalForcedActions;
10481 else
10482 {
10483 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10484 break;
10485 }
10486 }
10487#endif
10488 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10489 {
10490#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10491 uint64_t fCpu = pVCpu->fLocalForcedActions;
10492#endif
10493 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10494 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10495 | VMCPU_FF_TLB_FLUSH
10496 | VMCPU_FF_UNHALT );
10497 if (RT_LIKELY( ( ( !fCpu
10498 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10499 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10500 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10501 || pStats->cInstructions < cMinInstructions))
10502 {
10503 if (pStats->cInstructions < cMaxInstructions)
10504 {
10505 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10506 {
10507#ifdef IN_RING0
10508 if ( !fCheckPreemptionPending
10509 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10510#endif
10511 {
10512 Assert(pVCpu->iem.s.cActiveMappings == 0);
10513 iemReInitDecoder(pVCpu);
10514 continue;
10515 }
10516#ifdef IN_RING0
10517 rcStrict = VINF_EM_RAW_INTERRUPT;
10518 break;
10519#endif
10520 }
10521 }
10522 }
10523 Assert(!(fCpu & VMCPU_FF_IEM));
10524 }
10525 Assert(pVCpu->iem.s.cActiveMappings == 0);
10526 }
10527 else if (pVCpu->iem.s.cActiveMappings > 0)
10528 iemMemRollback(pVCpu);
10529 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10530 break;
10531 }
10532 }
10533#ifdef IEM_WITH_SETJMP
10534 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10535 {
10536 if (pVCpu->iem.s.cActiveMappings > 0)
10537 iemMemRollback(pVCpu);
10538 pVCpu->iem.s.cLongJumps++;
10539 }
10540 IEM_CATCH_LONGJMP_END(pVCpu);
10541#endif
10542
10543 /*
10544 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10545 */
10546 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10547 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10548 }
10549 else
10550 {
10551 if (pVCpu->iem.s.cActiveMappings > 0)
10552 iemMemRollback(pVCpu);
10553
10554#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10555 /*
10556 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10557 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10558 */
10559 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10560#endif
10561 }
10562
10563 /*
10564 * Maybe re-enter raw-mode and log.
10565 */
10566 if (rcStrict != VINF_SUCCESS)
10567 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10568 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10569 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10570 return rcStrict;
10571}
10572
10573
10574/**
10575 * Injects a trap, fault, abort, software interrupt or external interrupt.
10576 *
10577 * The parameter list matches TRPMQueryTrapAll pretty closely.
10578 *
10579 * @returns Strict VBox status code.
10580 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10581 * @param u8TrapNo The trap number.
10582 * @param enmType What type is it (trap/fault/abort), software
10583 * interrupt or hardware interrupt.
10584 * @param uErrCode The error code if applicable.
10585 * @param uCr2 The CR2 value if applicable.
10586 * @param cbInstr The instruction length (only relevant for
10587 * software interrupts).
10588 */
10589VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10590 uint8_t cbInstr)
10591{
10592 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10593#ifdef DBGFTRACE_ENABLED
10594 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10595 u8TrapNo, enmType, uErrCode, uCr2);
10596#endif
10597
10598 uint32_t fFlags;
10599 switch (enmType)
10600 {
10601 case TRPM_HARDWARE_INT:
10602 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10603 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10604 uErrCode = uCr2 = 0;
10605 break;
10606
10607 case TRPM_SOFTWARE_INT:
10608 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10609 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10610 uErrCode = uCr2 = 0;
10611 break;
10612
10613 case TRPM_TRAP:
10614 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10615 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10616 if (u8TrapNo == X86_XCPT_PF)
10617 fFlags |= IEM_XCPT_FLAGS_CR2;
10618 switch (u8TrapNo)
10619 {
10620 case X86_XCPT_DF:
10621 case X86_XCPT_TS:
10622 case X86_XCPT_NP:
10623 case X86_XCPT_SS:
10624 case X86_XCPT_PF:
10625 case X86_XCPT_AC:
10626 case X86_XCPT_GP:
10627 fFlags |= IEM_XCPT_FLAGS_ERR;
10628 break;
10629 }
10630 break;
10631
10632 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10633 }
10634
10635 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10636
10637 if (pVCpu->iem.s.cActiveMappings > 0)
10638 iemMemRollback(pVCpu);
10639
10640 return rcStrict;
10641}
10642
10643
10644/**
10645 * Injects the active TRPM event.
10646 *
10647 * @returns Strict VBox status code.
10648 * @param pVCpu The cross context virtual CPU structure.
10649 */
10650VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10651{
10652#ifndef IEM_IMPLEMENTS_TASKSWITCH
10653 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10654#else
10655 uint8_t u8TrapNo;
10656 TRPMEVENT enmType;
10657 uint32_t uErrCode;
10658 RTGCUINTPTR uCr2;
10659 uint8_t cbInstr;
10660 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10661 if (RT_FAILURE(rc))
10662 return rc;
10663
10664 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10665 * ICEBP \#DB injection as a special case. */
10666 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10667#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10668 if (rcStrict == VINF_SVM_VMEXIT)
10669 rcStrict = VINF_SUCCESS;
10670#endif
10671#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10672 if (rcStrict == VINF_VMX_VMEXIT)
10673 rcStrict = VINF_SUCCESS;
10674#endif
10675 /** @todo Are there any other codes that imply the event was successfully
10676 * delivered to the guest? See @bugref{6607}. */
10677 if ( rcStrict == VINF_SUCCESS
10678 || rcStrict == VINF_IEM_RAISED_XCPT)
10679 TRPMResetTrap(pVCpu);
10680
10681 return rcStrict;
10682#endif
10683}
10684
10685
10686VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10687{
10688 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10689 return VERR_NOT_IMPLEMENTED;
10690}
10691
10692
10693VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10694{
10695 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10696 return VERR_NOT_IMPLEMENTED;
10697}
10698
10699
10700/**
10701 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10702 *
10703 * This API ASSUMES that the caller has already verified that the guest code is
10704 * allowed to access the I/O port. (The I/O port is in the DX register in the
10705 * guest state.)
10706 *
10707 * @returns Strict VBox status code.
10708 * @param pVCpu The cross context virtual CPU structure.
10709 * @param cbValue The size of the I/O port access (1, 2, or 4).
10710 * @param enmAddrMode The addressing mode.
10711 * @param fRepPrefix Indicates whether a repeat prefix is used
10712 * (doesn't matter which for this instruction).
10713 * @param cbInstr The instruction length in bytes.
10714 * @param iEffSeg The effective segment address.
10715 * @param fIoChecked Whether the access to the I/O port has been
10716 * checked or not. It's typically checked in the
10717 * HM scenario.
10718 */
10719VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10720 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10721{
10722 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10723 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10724
10725 /*
10726 * State init.
10727 */
10728 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10729
10730 /*
10731 * Switch orgy for getting to the right handler.
10732 */
10733 VBOXSTRICTRC rcStrict;
10734 if (fRepPrefix)
10735 {
10736 switch (enmAddrMode)
10737 {
10738 case IEMMODE_16BIT:
10739 switch (cbValue)
10740 {
10741 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10742 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10743 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10744 default:
10745 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10746 }
10747 break;
10748
10749 case IEMMODE_32BIT:
10750 switch (cbValue)
10751 {
10752 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10753 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10754 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10755 default:
10756 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10757 }
10758 break;
10759
10760 case IEMMODE_64BIT:
10761 switch (cbValue)
10762 {
10763 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10764 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10765 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10766 default:
10767 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10768 }
10769 break;
10770
10771 default:
10772 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10773 }
10774 }
10775 else
10776 {
10777 switch (enmAddrMode)
10778 {
10779 case IEMMODE_16BIT:
10780 switch (cbValue)
10781 {
10782 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10783 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10784 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10785 default:
10786 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10787 }
10788 break;
10789
10790 case IEMMODE_32BIT:
10791 switch (cbValue)
10792 {
10793 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10794 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10795 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10796 default:
10797 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10798 }
10799 break;
10800
10801 case IEMMODE_64BIT:
10802 switch (cbValue)
10803 {
10804 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10805 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10806 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10807 default:
10808 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10809 }
10810 break;
10811
10812 default:
10813 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10814 }
10815 }
10816
10817 if (pVCpu->iem.s.cActiveMappings)
10818 iemMemRollback(pVCpu);
10819
10820 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10821}
10822
10823
10824/**
10825 * Interface for HM and EM for executing string I/O IN (read) instructions.
10826 *
10827 * This API ASSUMES that the caller has already verified that the guest code is
10828 * allowed to access the I/O port. (The I/O port is in the DX register in the
10829 * guest state.)
10830 *
10831 * @returns Strict VBox status code.
10832 * @param pVCpu The cross context virtual CPU structure.
10833 * @param cbValue The size of the I/O port access (1, 2, or 4).
10834 * @param enmAddrMode The addressing mode.
10835 * @param fRepPrefix Indicates whether a repeat prefix is used
10836 * (doesn't matter which for this instruction).
10837 * @param cbInstr The instruction length in bytes.
10838 * @param fIoChecked Whether the access to the I/O port has been
10839 * checked or not. It's typically checked in the
10840 * HM scenario.
10841 */
10842VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10843 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10844{
10845 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10846
10847 /*
10848 * State init.
10849 */
10850 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10851
10852 /*
10853 * Switch orgy for getting to the right handler.
10854 */
10855 VBOXSTRICTRC rcStrict;
10856 if (fRepPrefix)
10857 {
10858 switch (enmAddrMode)
10859 {
10860 case IEMMODE_16BIT:
10861 switch (cbValue)
10862 {
10863 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10864 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10865 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10866 default:
10867 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10868 }
10869 break;
10870
10871 case IEMMODE_32BIT:
10872 switch (cbValue)
10873 {
10874 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10875 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10876 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10877 default:
10878 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10879 }
10880 break;
10881
10882 case IEMMODE_64BIT:
10883 switch (cbValue)
10884 {
10885 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10886 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10887 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10888 default:
10889 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10890 }
10891 break;
10892
10893 default:
10894 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10895 }
10896 }
10897 else
10898 {
10899 switch (enmAddrMode)
10900 {
10901 case IEMMODE_16BIT:
10902 switch (cbValue)
10903 {
10904 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10905 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10906 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10907 default:
10908 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10909 }
10910 break;
10911
10912 case IEMMODE_32BIT:
10913 switch (cbValue)
10914 {
10915 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10916 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10917 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10918 default:
10919 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10920 }
10921 break;
10922
10923 case IEMMODE_64BIT:
10924 switch (cbValue)
10925 {
10926 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10927 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10928 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10929 default:
10930 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10931 }
10932 break;
10933
10934 default:
10935 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10936 }
10937 }
10938
10939 if ( pVCpu->iem.s.cActiveMappings == 0
10940 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10941 { /* likely */ }
10942 else
10943 {
10944 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10945 iemMemRollback(pVCpu);
10946 }
10947 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10948}
10949
10950
10951/**
10952 * Interface for rawmode to write execute an OUT instruction.
10953 *
10954 * @returns Strict VBox status code.
10955 * @param pVCpu The cross context virtual CPU structure.
10956 * @param cbInstr The instruction length in bytes.
10957 * @param u16Port The port to read.
10958 * @param fImm Whether the port is specified using an immediate operand or
10959 * using the implicit DX register.
10960 * @param cbReg The register size.
10961 *
10962 * @remarks In ring-0 not all of the state needs to be synced in.
10963 */
10964VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10965{
10966 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10967 Assert(cbReg <= 4 && cbReg != 3);
10968
10969 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10970 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10971 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10972 Assert(!pVCpu->iem.s.cActiveMappings);
10973 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10974}
10975
10976
10977/**
10978 * Interface for rawmode to write execute an IN instruction.
10979 *
10980 * @returns Strict VBox status code.
10981 * @param pVCpu The cross context virtual CPU structure.
10982 * @param cbInstr The instruction length in bytes.
10983 * @param u16Port The port to read.
10984 * @param fImm Whether the port is specified using an immediate operand or
10985 * using the implicit DX.
10986 * @param cbReg The register size.
10987 */
10988VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10989{
10990 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10991 Assert(cbReg <= 4 && cbReg != 3);
10992
10993 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10994 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10995 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10996 Assert(!pVCpu->iem.s.cActiveMappings);
10997 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10998}
10999
11000
11001/**
11002 * Interface for HM and EM to write to a CRx register.
11003 *
11004 * @returns Strict VBox status code.
11005 * @param pVCpu The cross context virtual CPU structure.
11006 * @param cbInstr The instruction length in bytes.
11007 * @param iCrReg The control register number (destination).
11008 * @param iGReg The general purpose register number (source).
11009 *
11010 * @remarks In ring-0 not all of the state needs to be synced in.
11011 */
11012VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11013{
11014 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11015 Assert(iCrReg < 16);
11016 Assert(iGReg < 16);
11017
11018 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11019 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11020 Assert(!pVCpu->iem.s.cActiveMappings);
11021 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11022}
11023
11024
11025/**
11026 * Interface for HM and EM to read from a CRx register.
11027 *
11028 * @returns Strict VBox status code.
11029 * @param pVCpu The cross context virtual CPU structure.
11030 * @param cbInstr The instruction length in bytes.
11031 * @param iGReg The general purpose register number (destination).
11032 * @param iCrReg The control register number (source).
11033 *
11034 * @remarks In ring-0 not all of the state needs to be synced in.
11035 */
11036VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11037{
11038 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11039 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11040 | CPUMCTX_EXTRN_APIC_TPR);
11041 Assert(iCrReg < 16);
11042 Assert(iGReg < 16);
11043
11044 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11045 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11046 Assert(!pVCpu->iem.s.cActiveMappings);
11047 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11048}
11049
11050
11051/**
11052 * Interface for HM and EM to write to a DRx register.
11053 *
11054 * @returns Strict VBox status code.
11055 * @param pVCpu The cross context virtual CPU structure.
11056 * @param cbInstr The instruction length in bytes.
11057 * @param iDrReg The debug register number (destination).
11058 * @param iGReg The general purpose register number (source).
11059 *
11060 * @remarks In ring-0 not all of the state needs to be synced in.
11061 */
11062VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11063{
11064 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11065 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11066 Assert(iDrReg < 8);
11067 Assert(iGReg < 16);
11068
11069 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11070 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11071 Assert(!pVCpu->iem.s.cActiveMappings);
11072 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11073}
11074
11075
11076/**
11077 * Interface for HM and EM to read from a DRx register.
11078 *
11079 * @returns Strict VBox status code.
11080 * @param pVCpu The cross context virtual CPU structure.
11081 * @param cbInstr The instruction length in bytes.
11082 * @param iGReg The general purpose register number (destination).
11083 * @param iDrReg The debug register number (source).
11084 *
11085 * @remarks In ring-0 not all of the state needs to be synced in.
11086 */
11087VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11088{
11089 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11090 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11091 Assert(iDrReg < 8);
11092 Assert(iGReg < 16);
11093
11094 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11095 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11096 Assert(!pVCpu->iem.s.cActiveMappings);
11097 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11098}
11099
11100
11101/**
11102 * Interface for HM and EM to clear the CR0[TS] bit.
11103 *
11104 * @returns Strict VBox status code.
11105 * @param pVCpu The cross context virtual CPU structure.
11106 * @param cbInstr The instruction length in bytes.
11107 *
11108 * @remarks In ring-0 not all of the state needs to be synced in.
11109 */
11110VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11111{
11112 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11113
11114 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11115 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11116 Assert(!pVCpu->iem.s.cActiveMappings);
11117 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11118}
11119
11120
11121/**
11122 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11123 *
11124 * @returns Strict VBox status code.
11125 * @param pVCpu The cross context virtual CPU structure.
11126 * @param cbInstr The instruction length in bytes.
11127 * @param uValue The value to load into CR0.
11128 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11129 * memory operand. Otherwise pass NIL_RTGCPTR.
11130 *
11131 * @remarks In ring-0 not all of the state needs to be synced in.
11132 */
11133VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11134{
11135 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11136
11137 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11138 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11139 Assert(!pVCpu->iem.s.cActiveMappings);
11140 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11141}
11142
11143
11144/**
11145 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11146 *
11147 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11148 *
11149 * @returns Strict VBox status code.
11150 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11151 * @param cbInstr The instruction length in bytes.
11152 * @remarks In ring-0 not all of the state needs to be synced in.
11153 * @thread EMT(pVCpu)
11154 */
11155VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11156{
11157 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11158
11159 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11160 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11161 Assert(!pVCpu->iem.s.cActiveMappings);
11162 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11163}
11164
11165
11166/**
11167 * Interface for HM and EM to emulate the WBINVD instruction.
11168 *
11169 * @returns Strict VBox status code.
11170 * @param pVCpu The cross context virtual CPU structure.
11171 * @param cbInstr The instruction length in bytes.
11172 *
11173 * @remarks In ring-0 not all of the state needs to be synced in.
11174 */
11175VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11176{
11177 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11178
11179 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11180 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11181 Assert(!pVCpu->iem.s.cActiveMappings);
11182 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11183}
11184
11185
11186/**
11187 * Interface for HM and EM to emulate the INVD instruction.
11188 *
11189 * @returns Strict VBox status code.
11190 * @param pVCpu The cross context virtual CPU structure.
11191 * @param cbInstr The instruction length in bytes.
11192 *
11193 * @remarks In ring-0 not all of the state needs to be synced in.
11194 */
11195VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11196{
11197 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11198
11199 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11200 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11201 Assert(!pVCpu->iem.s.cActiveMappings);
11202 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11203}
11204
11205
11206/**
11207 * Interface for HM and EM to emulate the INVLPG instruction.
11208 *
11209 * @returns Strict VBox status code.
11210 * @retval VINF_PGM_SYNC_CR3
11211 *
11212 * @param pVCpu The cross context virtual CPU structure.
11213 * @param cbInstr The instruction length in bytes.
11214 * @param GCPtrPage The effective address of the page to invalidate.
11215 *
11216 * @remarks In ring-0 not all of the state needs to be synced in.
11217 */
11218VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11219{
11220 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11221
11222 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11223 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11224 Assert(!pVCpu->iem.s.cActiveMappings);
11225 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11226}
11227
11228
11229/**
11230 * Interface for HM and EM to emulate the INVPCID instruction.
11231 *
11232 * @returns Strict VBox status code.
11233 * @retval VINF_PGM_SYNC_CR3
11234 *
11235 * @param pVCpu The cross context virtual CPU structure.
11236 * @param cbInstr The instruction length in bytes.
11237 * @param iEffSeg The effective segment register.
11238 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11239 * @param uType The invalidation type.
11240 *
11241 * @remarks In ring-0 not all of the state needs to be synced in.
11242 */
11243VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11244 uint64_t uType)
11245{
11246 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11247
11248 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11249 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11250 Assert(!pVCpu->iem.s.cActiveMappings);
11251 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11252}
11253
11254
11255/**
11256 * Interface for HM and EM to emulate the CPUID instruction.
11257 *
11258 * @returns Strict VBox status code.
11259 *
11260 * @param pVCpu The cross context virtual CPU structure.
11261 * @param cbInstr The instruction length in bytes.
11262 *
11263 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11264 */
11265VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11266{
11267 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11268 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11269
11270 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11271 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11272 Assert(!pVCpu->iem.s.cActiveMappings);
11273 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11274}
11275
11276
11277/**
11278 * Interface for HM and EM to emulate the RDPMC instruction.
11279 *
11280 * @returns Strict VBox status code.
11281 *
11282 * @param pVCpu The cross context virtual CPU structure.
11283 * @param cbInstr The instruction length in bytes.
11284 *
11285 * @remarks Not all of the state needs to be synced in.
11286 */
11287VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11288{
11289 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11290 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11291
11292 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11293 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11294 Assert(!pVCpu->iem.s.cActiveMappings);
11295 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11296}
11297
11298
11299/**
11300 * Interface for HM and EM to emulate the RDTSC instruction.
11301 *
11302 * @returns Strict VBox status code.
11303 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11304 *
11305 * @param pVCpu The cross context virtual CPU structure.
11306 * @param cbInstr The instruction length in bytes.
11307 *
11308 * @remarks Not all of the state needs to be synced in.
11309 */
11310VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11311{
11312 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11313 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11314
11315 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11316 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11317 Assert(!pVCpu->iem.s.cActiveMappings);
11318 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11319}
11320
11321
11322/**
11323 * Interface for HM and EM to emulate the RDTSCP instruction.
11324 *
11325 * @returns Strict VBox status code.
11326 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11327 *
11328 * @param pVCpu The cross context virtual CPU structure.
11329 * @param cbInstr The instruction length in bytes.
11330 *
11331 * @remarks Not all of the state needs to be synced in. Recommended
11332 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11333 */
11334VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11335{
11336 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11337 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11338
11339 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11340 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11341 Assert(!pVCpu->iem.s.cActiveMappings);
11342 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11343}
11344
11345
11346/**
11347 * Interface for HM and EM to emulate the RDMSR instruction.
11348 *
11349 * @returns Strict VBox status code.
11350 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11351 *
11352 * @param pVCpu The cross context virtual CPU structure.
11353 * @param cbInstr The instruction length in bytes.
11354 *
11355 * @remarks Not all of the state needs to be synced in. Requires RCX and
11356 * (currently) all MSRs.
11357 */
11358VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11359{
11360 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11361 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11362
11363 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11364 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11365 Assert(!pVCpu->iem.s.cActiveMappings);
11366 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11367}
11368
11369
11370/**
11371 * Interface for HM and EM to emulate the WRMSR instruction.
11372 *
11373 * @returns Strict VBox status code.
11374 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11375 *
11376 * @param pVCpu The cross context virtual CPU structure.
11377 * @param cbInstr The instruction length in bytes.
11378 *
11379 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11380 * and (currently) all MSRs.
11381 */
11382VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11383{
11384 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11385 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11386 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11387
11388 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11389 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11390 Assert(!pVCpu->iem.s.cActiveMappings);
11391 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11392}
11393
11394
11395/**
11396 * Interface for HM and EM to emulate the MONITOR instruction.
11397 *
11398 * @returns Strict VBox status code.
11399 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11400 *
11401 * @param pVCpu The cross context virtual CPU structure.
11402 * @param cbInstr The instruction length in bytes.
11403 *
11404 * @remarks Not all of the state needs to be synced in.
11405 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11406 * are used.
11407 */
11408VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11409{
11410 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11411 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11412
11413 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11414 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11415 Assert(!pVCpu->iem.s.cActiveMappings);
11416 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11417}
11418
11419
11420/**
11421 * Interface for HM and EM to emulate the MWAIT instruction.
11422 *
11423 * @returns Strict VBox status code.
11424 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11425 *
11426 * @param pVCpu The cross context virtual CPU structure.
11427 * @param cbInstr The instruction length in bytes.
11428 *
11429 * @remarks Not all of the state needs to be synced in.
11430 */
11431VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11432{
11433 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11434 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11435
11436 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11437 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11438 Assert(!pVCpu->iem.s.cActiveMappings);
11439 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11440}
11441
11442
11443/**
11444 * Interface for HM and EM to emulate the HLT instruction.
11445 *
11446 * @returns Strict VBox status code.
11447 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11448 *
11449 * @param pVCpu The cross context virtual CPU structure.
11450 * @param cbInstr The instruction length in bytes.
11451 *
11452 * @remarks Not all of the state needs to be synced in.
11453 */
11454VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11455{
11456 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11457
11458 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11459 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11460 Assert(!pVCpu->iem.s.cActiveMappings);
11461 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11462}
11463
11464
11465/**
11466 * Checks if IEM is in the process of delivering an event (interrupt or
11467 * exception).
11468 *
11469 * @returns true if we're in the process of raising an interrupt or exception,
11470 * false otherwise.
11471 * @param pVCpu The cross context virtual CPU structure.
11472 * @param puVector Where to store the vector associated with the
11473 * currently delivered event, optional.
11474 * @param pfFlags Where to store th event delivery flags (see
11475 * IEM_XCPT_FLAGS_XXX), optional.
11476 * @param puErr Where to store the error code associated with the
11477 * event, optional.
11478 * @param puCr2 Where to store the CR2 associated with the event,
11479 * optional.
11480 * @remarks The caller should check the flags to determine if the error code and
11481 * CR2 are valid for the event.
11482 */
11483VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11484{
11485 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11486 if (fRaisingXcpt)
11487 {
11488 if (puVector)
11489 *puVector = pVCpu->iem.s.uCurXcpt;
11490 if (pfFlags)
11491 *pfFlags = pVCpu->iem.s.fCurXcpt;
11492 if (puErr)
11493 *puErr = pVCpu->iem.s.uCurXcptErr;
11494 if (puCr2)
11495 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11496 }
11497 return fRaisingXcpt;
11498}
11499
11500#ifdef IN_RING3
11501
11502/**
11503 * Handles the unlikely and probably fatal merge cases.
11504 *
11505 * @returns Merged status code.
11506 * @param rcStrict Current EM status code.
11507 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11508 * with @a rcStrict.
11509 * @param iMemMap The memory mapping index. For error reporting only.
11510 * @param pVCpu The cross context virtual CPU structure of the calling
11511 * thread, for error reporting only.
11512 */
11513DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11514 unsigned iMemMap, PVMCPUCC pVCpu)
11515{
11516 if (RT_FAILURE_NP(rcStrict))
11517 return rcStrict;
11518
11519 if (RT_FAILURE_NP(rcStrictCommit))
11520 return rcStrictCommit;
11521
11522 if (rcStrict == rcStrictCommit)
11523 return rcStrictCommit;
11524
11525 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11526 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11527 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11528 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11529 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11530 return VERR_IOM_FF_STATUS_IPE;
11531}
11532
11533
11534/**
11535 * Helper for IOMR3ProcessForceFlag.
11536 *
11537 * @returns Merged status code.
11538 * @param rcStrict Current EM status code.
11539 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11540 * with @a rcStrict.
11541 * @param iMemMap The memory mapping index. For error reporting only.
11542 * @param pVCpu The cross context virtual CPU structure of the calling
11543 * thread, for error reporting only.
11544 */
11545DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11546{
11547 /* Simple. */
11548 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11549 return rcStrictCommit;
11550
11551 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11552 return rcStrict;
11553
11554 /* EM scheduling status codes. */
11555 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11556 && rcStrict <= VINF_EM_LAST))
11557 {
11558 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11559 && rcStrictCommit <= VINF_EM_LAST))
11560 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11561 }
11562
11563 /* Unlikely */
11564 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11565}
11566
11567
11568/**
11569 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11570 *
11571 * @returns Merge between @a rcStrict and what the commit operation returned.
11572 * @param pVM The cross context VM structure.
11573 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11574 * @param rcStrict The status code returned by ring-0 or raw-mode.
11575 */
11576VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11577{
11578 /*
11579 * Reset the pending commit.
11580 */
11581 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11582 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11583 ("%#x %#x %#x\n",
11584 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11585 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11586
11587 /*
11588 * Commit the pending bounce buffers (usually just one).
11589 */
11590 unsigned cBufs = 0;
11591 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11592 while (iMemMap-- > 0)
11593 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11594 {
11595 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11596 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11597 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11598
11599 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11600 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11601 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11602
11603 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11604 {
11605 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11606 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11607 pbBuf,
11608 cbFirst,
11609 PGMACCESSORIGIN_IEM);
11610 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11611 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11612 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11613 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11614 }
11615
11616 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11617 {
11618 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11619 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11620 pbBuf + cbFirst,
11621 cbSecond,
11622 PGMACCESSORIGIN_IEM);
11623 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11624 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11625 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11626 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11627 }
11628 cBufs++;
11629 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11630 }
11631
11632 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11633 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11634 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11635 pVCpu->iem.s.cActiveMappings = 0;
11636 return rcStrict;
11637}
11638
11639#endif /* IN_RING3 */
11640
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette